text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# PyMM - Python MP3 Manager
# Copyright (C) 2000 Pierre Hjalm <pierre.hjalm@dis.uu.se>
#
# Modified by Alexander Kanavin <ak@sensi.org>
# Removed ID tags support and added VBR support
# Used http://home.swipnet.se/grd/mp3info/ for information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
""" mp3.py
Reads information from an mp3 file.
This is a python port of code taken from the mpg123 input module of xmms.
"""
import struct
def header(buf):
return struct.unpack(">I",buf)[0]
def head_check(head):
if ((head & 0xffe00000L) != 0xffe00000L):
return 0
if (not ((head >> 17) & 3)):
return 0
if (((head >> 12) & 0xf) == 0xf):
return 0
if ( not ((head >> 12) & 0xf)):
return 0
if (((head >> 10) & 0x3) == 0x3):
return 0
if (((head >> 19) & 1) == 1 and ((head >> 17) & 3) == 3 and ((head >> 16) & 1) == 1):
return 0
if ((head & 0xffff0000L) == 0xfffe0000L):
return 0
return 1
def filesize(file):
""" Returns the size of file sans any ID3 tag
"""
f=open(file)
f.seek(0,2)
size=f.tell()
try:
f.seek(-128,2)
except:
f.close()
return 0
buf=f.read(3)
f.close()
if buf=="TAG":
size=size-128
if size<0:
return 0
else:
return size
table=[[
[0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448],
[0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384],
[0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]],
[
[0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160]]]
def decode_header(head):
""" Decode the mp3 header and put the information in a frame structure
"""
freqs=[44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000]
fr={}
if head & (1 << 20):
if head & (1 << 19):
fr["lsf"]=0
else:
fr["lsf"]=1
fr["mpeg25"] = 0
else:
fr["lsf"] = 1
fr["mpeg25"] = 1
fr["lay"] = 4 - ((head >> 17) & 3)
if fr["mpeg25"]:
fr["sampling_frequency"] = freqs[6 + ((head >> 10) & 0x3)]
else:
fr["sampling_frequency"] = freqs[((head >> 10) & 0x3) + (fr["lsf"] * 3)]
fr["error_protection"] = ((head >> 16) & 0x1) ^ 0x1
fr["bitrate_index"] = ((head >> 12) & 0xf)
fr["bitrate"]=table[fr["lsf"]][fr["lay"]-1][fr["bitrate_index"]]
fr["padding"]=((head>>9) & 0x1)
fr["channel_mode"]=((head>>6) & 0x3)
if fr["lay"]==1:
fr["framesize"]=table[fr["lsf"]][0][fr["bitrate_index"]]*12000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=((fr["framesize"]+fr["padding"])<<2)-4
elif fr["lay"]==2:
fr["framesize"]=table[fr["lsf"]][1][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=fr["framesize"]+fr["padding"]-1
elif fr["lay"]==3:
fr["framesize"]=table[fr["lsf"]][2][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]<<fr["lsf"]
fr["framesize"]=fr["framesize"]+fr["padding"]-4
pass
else:
return 0
return fr
def decode_vbr(buf):
vbr = {}
if buf[:4] != "Xing":
return 0
frames_flag = ord(buf[7]) & 1
if not frames_flag:
return 0
vbr["frames"] = header(buf[8:])
return vbr
def decode_synch_integer(buf):
return (ord(buf[0])<<21)+(ord(buf[1])<<14)+(ord(buf[2])<<7)+ord(buf[3])
def detect_mp3(filename):
""" Determines whether this is an mp3 file and if so reads information
from it.
"""
try:
f=open(filename,"rb")
except:
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if tmp[:3] == 'ID3':
try:
tmp = f.read(6)
f.seek(decode_synch_integer(tmp[2:])+10)
tmp=f.read(4)
except:
f.close()
return 0
try:
head=header(tmp)
except:
return 0
while not head_check(head):
# This is a real time waster, but an mp3 stream can start anywhere
# in a file so we have to search the entire file which can take a
# while for large non-mp3 files.
try:
buf=f.read(1024)
except:
f.close()
return 0
if buf=="":
f.close()
return 0
for i in range(0,len(buf)-1):
head=long(head)<<8;
head=head|ord(buf[i])
if head_check(head):
f.seek(i+1-len(buf),1)
break
mhead=decode_header(head)
if mhead:
# Decode VBR header if there's any.
pos = f.tell()
mhead["vbr"] = 0
if not mhead["lsf"]:
if mhead["channel_mode"] == 3:
vbrpos = 17
else:
vbrpos = 32
else:
if mhead["channel_mode"] == 3:
vbrpos = 9
else:
vbrpos = 17
try:
f.seek(vbrpos,1)
vbr = decode_vbr(f.read(12))
mhead["vbrframes"] = vbr["frames"]
if mhead["vbrframes"] >0:
mhead["vbr"] = 1
except:
pass
# We found something which looks like a MPEG-header
# We check the next frame too, to be sure
if f.seek(pos+mhead["framesize"]):
f.close()
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if len(tmp)!=4:
f.close()
return 0
htmp=header(tmp)
if not (head_check(htmp) and decode_header(htmp)):
f.close()
return 0
f.close()
# If we have found a valid mp3 add some more info the head data.
if mhead:
mhead["filesize"]=filesize(filename)
if not mhead["vbr"]:
if mhead["bitrate"] and mhead["filesize"]:
mhead["time"]=int(float(mhead["filesize"])/(mhead["bitrate"]*1000)*8)
else:
mhead["time"]=0
else:
if mhead["filesize"] and mhead["sampling_frequency"]:
medframesize = float(mhead["filesize"])/float(mhead["vbrframes"])
if mhead["lsf"]:
coef = 12
else:
coef = 144
vbrrate = medframesize*mhead["sampling_frequency"]/(1000*coef)
mhead["time"]=int(float(mhead["filesize"])/(vbrrate*1000)*8)
mhead["vbrrate"] = int(vbrrate)
return mhead
else:
return 0
if __name__=="__main__":
import sys
mp3info=detect_mp3(sys.argv[1])
if mp3info:
print mp3info
else:
print "Not an mp3 file."
| JoeGermuska/worblehat | reference/pyarchive/pyarchive/mp3.py | Python | mit | 7,381 | 0.022897 |
from ..base import BaseShortener
from ..exceptions import ShorteningErrorException
class Shortener(BaseShortener):
"""
TinyURL.com shortener implementation
Example:
>>> import pyshorteners
>>> s = pyshorteners.Shortener()
>>> s.tinyurl.short('http://www.google.com')
'http://tinyurl.com/TEST'
>>> s.tinyurl.expand('http://tinyurl.com/test')
'http://www.google.com'
"""
api_url = "http://tinyurl.com/api-create.php"
def short(self, url):
"""Short implementation for TinyURL.com
Args:
url: the URL you want to shorten
Returns:
A string containing the shortened URL
Raises:
ShorteningErrorException: If the API returns an error as response
"""
url = self.clean_url(url)
response = self._get(self.api_url, params=dict(url=url))
if response.ok:
return response.text.strip()
raise ShorteningErrorException(response.content)
| ellisonleao/pyshorteners | pyshorteners/shorteners/tinyurl.py | Python | gpl-3.0 | 1,019 | 0 |
import autocomplete_light.shortcuts as autocomplete_light
from django import VERSION
from .models import *
try:
import genericm2m
except ImportError:
genericm2m = None
try:
import taggit
except ImportError:
taggit = None
class DjangoCompatMeta:
if VERSION >= (1, 6):
fields = '__all__'
class FkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = FkModel
class OtoModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = OtoModel
class MtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = MtmModel
class GfkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GfkModel
if genericm2m:
class GmtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GmtmModel
if taggit:
class TaggitModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = TaggitModel
| dsanders11/django-autocomplete-light | autocomplete_light/example_apps/basic/forms.py | Python | mit | 1,043 | 0.000959 |
import base64
def toBase64(s):
return base64.b64encode(str(s))
def fromBase64(s):
return base64.b64decode(str(s))
| HellTech/NAG_IoE_2016 | 30_HellTech_1602_1/08_Meteostanice_GUI_v2/Meteo2/base64function.py | Python | gpl-3.0 | 119 | 0.05042 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from openerp import models, fields, api, tools, _
class odisea_representative(models.Model):
"""Representative"""
_name = 'odisea.representative'
_description = 'Representative'
@api.multi
def _has_image(self):
return dict((p.id, bool(p.image)) for p in self)
name = fields.Char(string='Name', required=True)
cuit = fields.Char(string='CUIT', size=13)
title = fields.Many2one('res.partner.title', 'Title')
company = fields.Char(string='Company')
ref = fields.Char('Contact Reference', select=1)
website = fields.Char('Website', help="Website of Partner or Company")
comment = fields.Text('Notes')
category_id = fields.Many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags')
active = fields.Boolean('Active', default=True)
street = fields.Char('Street')
street2 = fields.Char('Street2')
zip = fields.Char('Zip', size=24, change_default=True)
city = fields.Char('City')
state_id = fields.Many2one("res.country.state", 'State', ondelete='restrict')
country_id = fields.Many2one('res.country', 'Country', ondelete='restrict')
email = fields.Char('Email')
phone = fields.Char('Phone')
fax = fields.Char('Fax')
mobile = fields.Char('Mobile')
birthdate = fields.Char('Birthdate')
function = fields.Char('Job Position')
is_company = fields.Boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person")
use_parent_address = fields.Boolean('Use Company Address', help="Select this if you want to set company's address information for this contact")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px")
image_medium = fields.Binary(compute="_get_image",
string="Medium-sized image",
store= False,
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = fields.Binary(compute="_get_image",
string="Small-sized image",
store= False,
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
has_image = fields.Boolean(compute=_has_image)
color = fields.Integer('Color Index')
@api.multi
def onchange_state(self, state_id):
if state_id:
state = self.env['res.country.state'].browse(state_id)
return {'value': {'country_id': state.country_id.id}}
return {}
@api.multi
def onchange_type(self, is_company):
value = {'title': False}
if is_company:
value['use_parent_address'] = False
domain = {'title': [('domain', '=', 'partner')]}
else:
domain = {'title': [('domain', '=', 'contact')]}
return {'value': value, 'domain': domain}
@api.one
@api.depends("image")
def _get_image(self):
""" calculate the images sizes and set the images to the corresponding
fields
"""
image = self.image
# check if the context contains the magic `bin_size` key
if self.env.context.get("bin_size"):
# refetch the image with a clean context
image = self.env[self._name].with_context({}).browse(self.id).image
data = tools.image_get_resized_images(image, return_big=True, avoid_resize_big=False)
self.image_big = data["image"]
self.image_medium = data["image_medium"]
self.image_small = data["image_small"]
return True
| vasconcelosfer/odoo-odisea | odisea/models/representative.py | Python | lgpl-3.0 | 4,870 | 0.015606 |
#!/usr/bin/env python
'''
Created on Jan 5, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import shutil
import subprocess
import sys
from optparse import OptionParser
# local imports
import chimerascan.pysam as pysam
from chimerascan.lib.feature import GeneFeature
from chimerascan.lib.seq import DNA_reverse_complement
from chimerascan.lib.config import JOB_ERROR, JOB_SUCCESS, ALIGN_INDEX, GENE_REF_PREFIX, GENE_FEATURE_FILE
from chimerascan.lib.base import check_executable
BASES_PER_LINE = 50
def split_seq(seq, chars_per_line):
pos = 0
newseq = []
while pos < len(seq):
if pos + chars_per_line > len(seq):
endpos = len(seq)
else:
endpos = pos + chars_per_line
newseq.append(seq[pos:endpos])
pos = endpos
return '\n'.join(newseq)
def bed12_to_fasta(gene_feature_file, reference_seq_file):
ref_fa = pysam.Fastafile(reference_seq_file)
for g in GeneFeature.parse(open(gene_feature_file)):
exon_seqs = []
error_occurred = False
for start, end in g.exons:
seq = ref_fa.fetch(g.chrom, start, end)
if not seq:
logging.warning("gene %s exon %s:%d-%d not found in reference" %
(g.tx_name, g.chrom, start, end))
error_occurred = True
break
exon_seqs.append(seq)
if error_occurred:
continue
# make fasta record
seq = ''.join(exon_seqs)
if g.strand == '-':
seq = DNA_reverse_complement(seq)
# break seq onto multiple lines
seqlines = split_seq(seq, BASES_PER_LINE)
yield (">%s range=%s:%d-%d gene=%s strand=%s\n%s" %
(GENE_REF_PREFIX + g.tx_name, g.chrom, start, end, g.strand, g.gene_name, seqlines))
ref_fa.close()
def create_chimerascan_index(output_dir, genome_fasta_file,
gene_feature_file,
bowtie_build_bin):
# create output dir if it does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.info("Created index directory: %s" % (output_dir))
# create FASTA index file
index_fasta_file = os.path.join(output_dir, ALIGN_INDEX + ".fa")
fh = open(index_fasta_file, "w")
# copy reference fasta file to output dir
logging.info("Adding reference genome to index...")
shutil.copyfileobj(open(genome_fasta_file), fh)
# extract sequences from gene feature file
logging.info("Adding gene models to index...")
for fa_record in bed12_to_fasta(gene_feature_file, genome_fasta_file):
print >>fh, fa_record
fh.close()
# copy gene bed file to index directory
shutil.copyfile(gene_feature_file, os.path.join(output_dir, GENE_FEATURE_FILE))
# index the combined fasta file
logging.info("Indexing FASTA file...")
fh = pysam.Fastafile(index_fasta_file)
fh.close()
# build bowtie index on the combined fasta file
logging.info("Building bowtie index...")
bowtie_index_name = os.path.join(output_dir, ALIGN_INDEX)
args = [bowtie_build_bin, index_fasta_file, bowtie_index_name]
if subprocess.call(args) != os.EX_OK:
logging.error("bowtie-build failed to create alignment index")
return JOB_ERROR
logging.info("chimerascan index created successfully")
return JOB_SUCCESS
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <reference_genome.fa> <gene_models.txt> <index_output_dir>")
parser.add_option("--bowtie-build-bin", dest="bowtie_build_bin", default="bowtie-build",
help="Path to 'bowtie-build' program")
options, args = parser.parse_args()
# check command line arguments
if len(args) < 3:
parser.error("Incorrect number of command line arguments")
ref_fasta_file = args[0]
gene_feature_file = args[1]
output_dir = args[2]
# check that input files exist
if not os.path.isfile(ref_fasta_file):
parser.error("Reference fasta file '%s' not found" % (ref_fasta_file))
if not os.path.isfile(gene_feature_file):
parser.error("Gene feature file '%s' not found" % (gene_feature_file))
# check that output dir is not a regular file
if os.path.exists(output_dir) and (not os.path.isdir(output_dir)):
parser.error("Output directory name '%s' exists and is not a valid directory" % (output_dir))
# check that bowtie-build program exists
if check_executable(options.bowtie_build_bin):
logging.debug("Checking for 'bowtie-build' binary... found")
else:
parser.error("bowtie-build binary not found or not executable")
# run main index creation function
retcode = create_chimerascan_index(output_dir, ref_fasta_file, gene_feature_file,
options.bowtie_build_bin)
sys.exit(retcode)
if __name__ == '__main__':
main() | madhavsuresh/chimerascan | chimerascan/deprecated/chimerascan_index_v1.py | Python | gpl-3.0 | 5,779 | 0.003634 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-09 08:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('georegion', '0001_initial_squashed_0004_auto_20180307_2026'),
]
operations = [
migrations.AlterField(
model_name='georegion',
name='part_of',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='georegion.GeoRegion', verbose_name='Part of'),
),
]
| stefanw/froide | froide/georegion/migrations/0002_auto_20180409_1007.py | Python | mit | 617 | 0.001621 |
# Copyright (c) 2013 Alan McIntyre
import httplib
import json
import decimal
import re
decimal.getcontext().rounding = decimal.ROUND_DOWN
exps = [decimal.Decimal("1e-%d" % i) for i in range(16)]
btce_domain = "btc-e.com"
all_currencies = ("btc", "usd", "rur", "ltc", "nmc", "eur", "nvc",
"trc", "ppc", "ftc", "xpm")
all_pairs = ("btc_usd", "btc_rur", "btc_eur", "ltc_btc", "ltc_usd",
"ltc_rur", "ltc_eur", "nmc_btc", "nmc_usd", "nvc_btc",
"nvc_usd", "usd_rur", "eur_usd", "trc_btc", "ppc_btc",
"ppc_usd", "ftc_btc", "xpm_btc")
max_digits = {"btc_usd": 3,
"btc_rur": 5,
"btc_eur": 5,
"ltc_btc": 5,
"ltc_usd": 6,
"ltc_rur": 5,
"ltc_eur": 3,
"nmc_btc": 5,
"nmc_usd": 3,
"nvc_btc": 5,
"nvc_usd": 3,
"usd_rur": 5,
"eur_usd": 5,
"trc_btc": 5,
"ppc_btc": 5,
"ppc_usd": 3,
"ftc_btc": 5,
"xpm_btc": 5}
min_orders = {"btc_usd": decimal.Decimal("0.01"),
"btc_rur": decimal.Decimal("0.1"),
"btc_eur": decimal.Decimal("0.1"),
"ltc_btc": decimal.Decimal("0.1"),
"ltc_usd": decimal.Decimal("0.1"),
"ltc_rur": decimal.Decimal("0.1"),
"ltc_eur": decimal.Decimal("0.1"),
"nmc_btc": decimal.Decimal("0.1"),
"nmc_usd": decimal.Decimal("0.1"),
"nvc_btc": decimal.Decimal("0.1"),
"nvc_usd": decimal.Decimal("0.1"),
"usd_rur": decimal.Decimal("0.1"),
"eur_usd": decimal.Decimal("0.1"),
"trc_btc": decimal.Decimal("0.1"),
"ppc_btc": decimal.Decimal("0.1"),
"ppc_usd": decimal.Decimal("0.1"),
"ftc_btc": decimal.Decimal("0.1"),
"xpm_btc": decimal.Decimal("0.1")}
def parseJSONResponse(response):
def parse_decimal(var):
return decimal.Decimal(var)
try:
r = json.loads(response, parse_float=parse_decimal,
parse_int=parse_decimal)
except Exception as e:
msg = "Error while attempting to parse JSON response:"\
" %s\nResponse:\n%r" % (e, response)
raise Exception(msg)
return r
HEADER_COOKIE_RE = re.compile(r'__cfduid=([a-f0-9]{46})')
BODY_COOKIE_RE = re.compile(r'document\.cookie="a=([a-f0-9]{32});path=/;";')
class BTCEConnection:
def __init__(self, timeout=30):
self.conn = httplib.HTTPSConnection(btce_domain, timeout=timeout)
self.cookie = None
def close(self):
self.conn.close()
def getCookie(self):
self.cookie = ""
self.conn.request("GET", '/')
response = self.conn.getresponse()
setCookieHeader = response.getheader("Set-Cookie")
match = HEADER_COOKIE_RE.search(setCookieHeader)
if match:
self.cookie = "__cfduid=" + match.group(1)
match = BODY_COOKIE_RE.search(response.read())
if match:
if self.cookie != "":
self.cookie += '; '
self.cookie += "a=" + match.group(1)
def makeRequest(self, url, extra_headers=None, params="", with_cookie=False):
headers = {"Content-type": "application/x-www-form-urlencoded"}
if extra_headers is not None:
headers.update(extra_headers)
if with_cookie:
if self.cookie is None:
self.getCookie()
headers.update({"Cookie": self.cookie})
self.conn.request("POST", url, params, headers)
response = self.conn.getresponse().read()
return response
def makeJSONRequest(self, url, extra_headers=None, params=""):
response = self.makeRequest(url, extra_headers, params)
return parseJSONResponse(response)
def validatePair(pair):
if pair not in all_pairs:
if "_" in pair:
a, b = pair.split("_")
swapped_pair = "%s_%s" % (b, a)
if swapped_pair in all_pairs:
msg = "Unrecognized pair: %r (did you mean %s?)"
msg = msg % (pair, swapped_pair)
raise Exception(msg)
raise Exception("Unrecognized pair: %r" % pair)
def validateOrder(pair, trade_type, rate, amount):
validatePair(pair)
if trade_type not in ("buy", "sell"):
raise Exception("Unrecognized trade type: %r" % trade_type)
minimum_amount = min_orders[pair]
formatted_min_amount = formatCurrency(minimum_amount, pair)
if amount < minimum_amount:
msg = "Trade amount too small; should be >= %s" % formatted_min_amount
raise Exception(msg)
def truncateAmountDigits(value, digits):
quantum = exps[digits]
return decimal.Decimal(value).quantize(quantum)
def truncateAmount(value, pair):
return truncateAmountDigits(value, max_digits[pair])
def formatCurrencyDigits(value, digits):
s = str(truncateAmountDigits(value, digits))
dot = s.index(".")
while s[-1] == "0" and len(s) > dot + 2:
s = s[:-1]
return s
def formatCurrency(value, pair):
return formatCurrencyDigits(value, max_digits[pair])
| blorenz/btce-api | btceapi/common.py | Python | mit | 5,264 | 0.00057 |
class a(object):
pass
class b(a):
pass
print a.__subclasses__()
class c(a):
pass
print a.__subclasses__() | meatcomputer/opencog | opencog/python/examples/test.py | Python | agpl-3.0 | 121 | 0.041322 |
# Generated by Django 2.2.17 on 2021-01-31 06:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conversation', '0032_twitterusertimeline'),
]
operations = [
migrations.AddField(
model_name='twitterusertimeline',
name='last_api_call',
field=models.DateTimeField(blank=True, null=True),
),
]
| jeromecc/doctoctocbot | src/conversation/migrations/0033_twitterusertimeline_last_api_call.py | Python | mpl-2.0 | 423 | 0 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, print_function, division
| dawncold/expenditure-application | expenditure_application/__init__.py | Python | apache-2.0 | 90 | 0 |
# GUI for pyfdtd using PySide
# Copyright (C) 2012 Patrik Gebhardt
# Contact: grosser.knuff@googlemail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from newLayer import *
from newSimulation import *
| schansge/pyfdtd-gui | src/dialogs/__init__.py | Python | gpl-3.0 | 797 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.util import nest
class Plus1RNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(tf.nn.rnn_cell.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
self._output_size = tf.TensorShape(self._dims)
self._state_size = (tf.TensorShape(self._dims), tf.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return tf.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return tf.identity(state)
class RNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [tf.placeholder(tf.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
tf.nn.rnn(cell, inputs, dtype=tf.float32, sequence_length=4)
with self.assertRaisesRegexp(ValueError, "must be a vector"):
tf.nn.dynamic_rnn(
cell, tf.pack(inputs), dtype=tf.float32, sequence_length=[[4]])
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(
values[-1],
max_length * np.ones((batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("drop_scope"):
dropped_outputs, _ = tf.nn.rnn(
full_dropout_cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(dropped_outputs,
feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def _testDynamicCalculation(self, use_gpu):
cell = Plus1RNNCell()
sequence_length = tf.placeholder(tf.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=use_gpu) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(dynamic_outputs,
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
dynamic_state_value = sess.run([dynamic_state],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(
dynamic_values[2],
np.vstack((
np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(
dynamic_state_value[0],
np.vstack((
1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def testDynamicCalculation(self):
self._testDynamicCalculation(True)
self._testDynamicCalculation(False)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
tf.initialize_all_variables()
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
return tf.nn.rnn(cell, inputs, dtype=tf.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class GRUTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testDynamic(self, use_gpu):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.nn.rnn_cell.GRUCell(num_units=num_units)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def testDynamic(self):
self._testDynamic(use_gpu=False)
self._testDynamic(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.initialize_all_variables()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.nn.rnn_cell.GRUCell(num_units=num_units)
return tf.nn.dynamic_rnn(cell, inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True, dtype=tf.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testNoProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(num_units, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testCellClipping(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True, cell_clip=0.0, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def _testNoProjNoShardingSimpleStateSaver(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=False, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={inputs[0]: input_value})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=False, initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={inputs[0]: input_value})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, {"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3})
def _cell(i):
return tf.nn.rnn_cell.LSTMCell(
num_units + i, use_peepholes=False, initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = tf.nn.rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
state_names = (("c0", "m0"), ("c1", "m1"),
("c2", "m2"), ("c3", "m3"))
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={inputs[0]: input_value})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={inputs[0]: input_value})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(
last_states[i],
named_saved_states[flat_state_names[i]])
def _testProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell_notuple = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=False)
cell_tuple = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=True)
outputs_notuple, state_notuple = tf.nn.rnn(
cell_notuple, inputs, dtype=tf.float32,
sequence_length=sequence_length)
tf.get_variable_scope().reuse_variables()
outputs_tuple, state_tuple = tf.nn.rnn(
cell_tuple, inputs, dtype=tf.float32,
sequence_length=sequence_length)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, tf.Tensor))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={inputs[0]: input_value})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={inputs[0]: input_value})
state_tuple_v = sess.run(
state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def _testProjSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testTooManyShards(self, use_gpu):
num_units = 3
input_size = 5
num_proj = 4
num_proj_shards = 4
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
with self.assertRaises(ValueError):
tf.nn.rnn(cell, inputs, dtype=tf.float32)
def _testDoubleInput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = tf.nn.rnn(
cell, inputs, initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def _testShardNoShardEquivalentOutput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
initializer = tf.constant_initializer(0.001)
cell_noshard = tf.nn.rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj,
state_is_tuple=False)
with tf.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = tf.nn.rnn(
cell_noshard, inputs, dtype=tf.float32)
with tf.variable_scope("shard_scope"):
outputs_shard, state_shard = tf.nn.rnn(
cell_shard, inputs, dtype=tf.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def _testDoubleInputWithDropoutAndDynamicCalculation(
self, use_gpu):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
sequence_length = tf.placeholder(tf.int64)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = tf.nn.rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = tf.nn.rnn(
dropout_cell, inputs, sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run(feed_dict={sequence_length: [2, 3]})
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
state_value = sess.run([state], feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = tf.random_uniform_initializer(-1, 1, seed=self._seed+1)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer,
state_is_tuple=False)
cell_d = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer_d,
state_is_tuple=False)
with tf.variable_scope("share_scope"):
outputs0, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("diff_scope"):
outputs2, _ = tf.nn.rnn(cell_d, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2*max_length]
outputs2_values = output_values[2*max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1-o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer,
state_is_tuple=False)
with tf.name_scope("scope0"):
with tf.variable_scope("share_scope"):
outputs0, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.name_scope("scope1"):
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
inputs_c = tf.pack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=True)
outputs_static, state_static = tf.nn.rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
tf.get_variable_scope().reuse_variables()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
self.assertTrue(isinstance(state_static, tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, tf.nn.rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(
state_static, feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
inputs_c = tf.pack(inputs)
def _cell(i):
return tf.nn.rnn_cell.LSTMCell(
num_units + i, use_peepholes=True,
num_proj=num_proj + i, initializer=initializer, state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = tf.nn.rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, tf.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
outputs_static, state_static = tf.nn.rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
tf.get_variable_scope().reuse_variables()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(
nest.flatten(state_static), feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
nest.flatten(state_dynamic), feed_dict={inputs[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
def _testDynamicEquivalentToStaticRNN(self, use_gpu, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unpack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj, state_is_tuple=False)
with tf.variable_scope("dynamic_scope"):
outputs_static, state_static = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = tf.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
tf.gradients(y, [concat_inputs])
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
tf.gradients(y, trainable_variables)
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unpack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj, state_is_tuple=False)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
split_outputs_dynamic = tf.unpack(outputs_dynamic, time_steps)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = tf.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
tf.gradients(y, [concat_inputs])
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
tf.gradients(y, trainable_variables)
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run(
(state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(zip(static_individual_grad_values,
dynamic_individual_grad_values)):
tf.logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf.logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
def testNoProjNoShardingSimpleStateSaver(self):
self._testNoProjNoShardingSimpleStateSaver(use_gpu=False)
self._testNoProjNoShardingSimpleStateSaver(use_gpu=True)
def testNoProjNoSharding(self):
self._testNoProjNoSharding(use_gpu=False)
self._testNoProjNoSharding(use_gpu=True)
def testCellClipping(self):
self._testCellClipping(use_gpu=False)
self._testCellClipping(use_gpu=True)
def testProjNoSharding(self):
self._testProjNoSharding(use_gpu=False)
self._testProjNoSharding(use_gpu=True)
def testProjSharding(self):
self._testProjSharding(use_gpu=False)
self._testProjSharding(use_gpu=True)
def testTooManyShards(self):
self._testTooManyShards(use_gpu=False)
self._testTooManyShards(use_gpu=True)
def testShardNoShardEquivalentOutput(self):
self._testShardNoShardEquivalentOutput(use_gpu=False)
self._testShardNoShardEquivalentOutput(use_gpu=True)
def testDoubleInput(self):
self._testDoubleInput(use_gpu=False)
self._testDoubleInput(use_gpu=True)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=False)
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=True)
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=True)
class BidirectionalRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self,
use_gpu,
use_shape,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
sequence_length = tf.placeholder(tf.int64) if use_sequence_length else None
cell_fw = tf.nn.rnn_cell.LSTMCell(num_units,
input_size,
initializer=initializer,
state_is_tuple=False)
cell_bw = tf.nn.rnn_cell.LSTMCell(num_units,
input_size,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(
tf.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = tf.nn.bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=tf.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(
out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = tf.pack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_gpu, use_shape, True))
tf.initialize_all_variables().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_gpu, use_shape, False))
tf.initialize_all_variables().run()
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_gpu=False, use_shape=False)
self._testBidirectionalRNN(use_gpu=True, use_shape=False)
self._testBidirectionalRNN(use_gpu=False, use_shape=True)
self._testBidirectionalRNN(use_gpu=True, use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=False,
use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=True,
use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=False,
use_shape=True)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=True,
use_shape=True)
def _createBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple, use_time_major,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
sequence_length = tf.placeholder(tf.int64)
cell_fw = tf.nn.rnn_cell.LSTMCell(num_units,
initializer=initializer,
state_is_tuple=use_state_tuple)
cell_bw = tf.nn.rnn_cell.LSTMCell(num_units,
initializer=initializer,
state_is_tuple=use_state_tuple)
inputs = max_length * [
tf.placeholder(tf.float32,
shape=(batch_size if use_shape else None, input_size))]
inputs_c = tf.pack(inputs)
if not use_time_major:
inputs_c = tf.transpose(inputs_c, [1, 0, 2])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=tf.float32,
time_major=use_time_major,
scope=scope)
outputs = tf.concat(2, outputs)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(
outputs.get_shape().as_list(),
outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple, use_time_major):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_gpu, use_shape, use_state_tuple, use_time_major))
tf.initialize_all_variables().run()
# Run with pre-specified sequence length of 2, 3
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
if not use_time_major:
out = np.swapaxes(out, 0, 1)
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
# Generate 2^4 option values
# from [True, True, True, True] to [False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(use_gpu=option[0], use_shape=option[1],
use_state_tuple=option[2],
use_time_major=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
tf.initialize_all_variables()
all_vars = tf.all_variables()
prefix = prefix or "BiRNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("BiRNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_gpu=True, use_shape=True,
use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_gpu=True, use_shape=True, use_state_tuple=True,
use_time_major=use_time_major, scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None,) + input_size)]
inputs_using_dim = max_length * [
tf.placeholder(tf.float32, shape=(batch_size,) + input_size)]
inputs_c = tf.pack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = tf.nn.rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
outputs_bid, state_bid_fw, state_bid_bw = tf.nn.bidirectional_rnn(
cell, cell, inputs_using_dim, dtype=tf.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = tf.nn.state_saving_rnn(
cell, inputs_using_dim, sequence_length=sequence_length,
state_saver=state_saver, state_name=("h", "c"))
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
tf.initialize_all_variables().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={inputs_using_dim[0]: input_value})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={inputs_using_dim[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={inputs[0]: input_value})
state_bid_fw_v = sess.run(
state_bid_fw, feed_dict={inputs_using_dim[0]: input_value})
state_bid_bw_v = sess.run(
state_bid_bw, feed_dict={inputs_using_dim[0]: input_value})
state_sav_v = sess.run(
state_sav, feed_dict={inputs_using_dim[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (tf.placeholder(tf.float32, shape=(None, input_size)),
tf.placeholder(tf.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (tf.pack([input_[0] for input_ in inputs]),
tf.pack([input_[1] for input_ in inputs]))
single_input_using_dim = (
tf.placeholder(tf.float32, shape=(batch_size, input_size)),
tf.placeholder(tf.float32, shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = tf.nn.rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs_bid, state_bid_fw, state_bid_bw = tf.nn.bidirectional_rnn(
cell, cell, inputs_using_dim, dtype=tf.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = tf.nn.state_saving_rnn(
cell, inputs_using_dim, sequence_length=sequence_length,
state_saver=state_saver, state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
tf.initialize_all_variables().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={single_input: input_value})
outputs_static_v = sess.run(
outputs_static, feed_dict={single_input: input_value})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={single_input_using_dim: input_value})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={single_input_using_dim: input_value})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={single_input: input_value})
state_static_v = sess.run(
state_static, feed_dict={single_input: input_value})
state_bid_fw_v = sess.run(
state_bid_fw, feed_dict={single_input_using_dim: input_value})
state_bid_bw_v = sess.run(
state_bid_bw, feed_dict={single_input_using_dim: input_value})
state_sav_v = sess.run(
state_sav, feed_dict={single_input_using_dim: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class RawRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=tf.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, tf.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
outputs_ta, final_state, _ = tf.nn.raw_rnn(cell, loop_fn)
outputs = outputs_ta.pack()
tf.get_variable_scope().reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = tf.nn.dynamic_rnn(
cell, inputs, time_major=True, dtype=tf.float32,
sequence_length=sequence_length)
variables = tf.trainable_variables()
gradients = tf.gradients([outputs, final_state], [inputs] + variables)
gradients_dynamic_rnn = tf.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
tf.initialize_all_variables().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val,
final_state_val, final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(
input_gradients_val, input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=tf.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tf.constant([0])
next_state = cell.zero_state(batch_size, tf.float32)
else:
loop_state = tf.pack([tf.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input,
next_state, emit_output, loop_state)
r = tf.nn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=tf.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tf.TensorArray(
dynamic_size=True, size=0, dtype=tf.int32, clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, tf.float32)
else:
loop_state = loop_state.write(
time_, loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input,
next_state, emit_output, loop_state)
r = tf.nn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.pack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=tf.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (tf.zeros([2, 3], dtype=tf.int32),
tf.zeros([1], dtype=tf.int64))
next_state = cell.zero_state(batch_size, tf.float32)
else:
emit_output = (tf.ones([batch_size, 2, 3], dtype=tf.int32),
tf.ones([batch_size, 1], dtype=tf.int64))
next_state = cell_state
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = tf.nn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([tf.int32, tf.int64], [ta.dtype for ta in output_ta])
output = [ta.pack() for ta in output_ta]
output_vals = sess.run(output)
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.initialize_all_variables()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, tf.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return tf.nn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class StateSaverRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.initialize_all_variables()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
def factory(scope):
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, use_peepholes=False, initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
return tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver,
state_name="save_lstm", scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + [final_state], trainable_variables)
return tf.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = tf.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start)/float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.initialize_all_variables().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
tf.initialize_all_variables().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static,
delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + [final_state], trainable_variables)
return tf.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(
batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
# Halve the sequence length, full static unroll
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t, sequence_length / 2)
tf.initialize_all_variables().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
tf.initialize_all_variables().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len/delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, use_peepholes=True,
initializer=initializer, state_is_tuple=state_is_tuple)
outputs, final_state = tf.nn.rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + final_state, trainable_variables)
return tf.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(
batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
# Run with concatenated states (default)
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
tf.initialize_all_variables().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
tf.initialize_all_variables().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state/delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length,
swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length,
swap_memory=swap_memory, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
tf.initialize_all_variables().run()
no_swap = _timer(sess, ops)
# Memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
tf.initialize_all_variables().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap/no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(5):
if dynamic:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
tf.initialize_all_variables().run()
elapsed = _timer(sess, ops)
else:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.initialize_all_variables().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" %
(batch_size, seqlen, num_units, dynamic, elapsed,
elapsed/seqlen))
class BenchmarkRNN(tf.test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(name="graph_creation_time_static_T%02d" % max_time,
iters=5, wall_time=s_dt)
self.report_benchmark(name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5, wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(
batch_size, max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=20, wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=20, wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (16, 128,):
for max_time in (50,):
for num_units in (16, 128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=t_dt)
if __name__ == "__main__":
tf.test.main()
| neilhan/tensorflow | tensorflow/python/kernel_tests/rnn_test.py | Python | apache-2.0 | 95,137 | 0.00987 |
"""
HSA driver bridge implementation
"""
from collections.abc import Sequence
import sys
import atexit
import os
import ctypes
import struct
import traceback
import weakref
import logging
from contextlib import contextmanager
from collections import defaultdict, deque
from functools import total_ordering
from numba import mviewbuf
from numba.core import utils, config
from .error import HsaSupportError, HsaDriverError, HsaApiError
from numba.roc.hsadrv import enums, enums_ext, drvapi
import numpy as np
_logger = logging.getLogger(__name__)
class HsaKernelTimedOut(HsaDriverError):
pass
def _device_type_to_string(device):
try:
return ['CPU', 'GPU', 'DSP'][device]
except IndexError:
return 'Unknown'
DEFAULT_HSA_DRIVER = '/opt/rocm/lib/libhsa-runtime64.so'
def _find_driver():
envpath = os.environ.get('NUMBA_HSA_DRIVER', DEFAULT_HSA_DRIVER)
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if (struct.calcsize('P') != 8
or sys.platform == 'win32'
or sys.platform == 'darwin'):
_raise_platform_not_supported()
else:
# Assume to be *nix like and 64 bit
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlname = 'libhsa-runtime64.so'
if envpath is not None:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = [dlname] + [os.path.join(x, dlname) for x in dldir]
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
PLATFORM_NOT_SUPPORTED_ERROR = """
HSA is not currently supported on this platform ({0}).
"""
def _raise_platform_not_supported():
raise HsaSupportError(PLATFORM_NOT_SUPPORTED_ERROR.format(sys.platform))
DRIVER_NOT_FOUND_MSG = """
The HSA runtime library cannot be found.
If you are sure that the HSA is installed, try setting environment
variable NUMBA_HSA_DRIVER with the file path of the HSA runtime shared
library.
"""
def _raise_driver_not_found():
raise HsaSupportError(DRIVER_NOT_FOUND_MSG)
DRIVER_LOAD_ERROR_MSG = """
A HSA runtime library was found, but failed to load with error:
%s
"""
def _raise_driver_error(e):
raise HsaSupportError(DRIVER_LOAD_ERROR_MSG % e)
MISSING_FUNCTION_ERRMSG = """driver missing function: %s.
"""
class Recycler(object):
def __init__(self):
self._garbage = []
self.enabled = True
def free(self, obj):
self._garbage.append(obj)
self.service()
def _cleanup(self):
for obj in self._garbage:
obj._finalizer(obj)
del self._garbage[:]
def service(self):
if self.enabled:
if len(self._garbage) > 10:
self._cleanup()
def drain(self):
self._cleanup()
self.enabled = False
# The Driver ###########################################################
class Driver(object):
"""
Driver API functions are lazily bound.
"""
_singleton = None
_agent_map = None
_api_prototypes = drvapi.API_PROTOTYPES # avoid premature GC at exit
_hsa_properties = {
'version_major': (enums.HSA_SYSTEM_INFO_VERSION_MAJOR, ctypes.c_uint16),
'version_minor': (enums.HSA_SYSTEM_INFO_VERSION_MINOR, ctypes.c_uint16),
'timestamp': (enums.HSA_SYSTEM_INFO_TIMESTAMP, ctypes.c_uint64),
'timestamp_frequency': (enums.HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, ctypes.c_uint16),
'signal_max_wait': (enums.HSA_SYSTEM_INFO_SIGNAL_MAX_WAIT, ctypes.c_uint64),
}
def __new__(cls):
obj = cls._singleton
if obj is not None:
return obj
else:
obj = object.__new__(cls)
cls._singleton = obj
return obj
def __init__(self):
try:
if config.DISABLE_HSA:
raise HsaSupportError("HSA disabled by user")
self.lib = _find_driver()
self.is_initialized = False
self.initialization_error = None
except HsaSupportError as e:
self.is_initialized = True
self.initialization_error = e
self._agent_map = None
self._programs = {}
self._recycler = Recycler()
self._active_streams = weakref.WeakSet()
def _initialize_api(self):
if self.is_initialized:
return
self.is_initialized = True
try:
self.hsa_init()
except HsaApiError as e:
self.initialization_error = e
raise HsaDriverError("Error at driver init: \n%s:" % e)
else:
@atexit.register
def shutdown():
try:
for agent in self.agents:
agent.release()
except AttributeError:
# this is because no agents initialised
# so self.agents isn't present
pass
else:
self._recycler.drain()
def _initialize_agents(self):
if self._agent_map is not None:
return
self._initialize_api()
agent_ids = []
def on_agent(agent_id, ctxt):
agent_ids.append(agent_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_ITER_AGENT_CALLBACK_FUNC(on_agent)
self.hsa_iterate_agents(callback, None)
agent_map = dict((agent_id, Agent(agent_id)) for agent_id in agent_ids)
self._agent_map = agent_map
@property
def is_available(self):
self._initialize_api()
return self.initialization_error is None
@property
def agents(self):
self._initialize_agents()
return self._agent_map.values()
def create_program(self, model=enums.HSA_MACHINE_MODEL_LARGE,
profile=enums.HSA_PROFILE_FULL,
rounding_mode=enums.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT,
options=None):
program = drvapi.hsa_ext_program_t()
assert options is None
self.hsa_ext_program_create(model, profile, rounding_mode,
options, ctypes.byref(program))
return Program(program)
def create_signal(self, initial_value, consumers=None):
if consumers is None:
consumers = tuple(self.agents)
consumers_len = len(consumers)
consumers_type = drvapi.hsa_agent_t * consumers_len
consumers = consumers_type(*[c._id for c in consumers])
result = drvapi.hsa_signal_t()
self.hsa_signal_create(initial_value, consumers_len, consumers,
ctypes.byref(result))
return Signal(result.value)
def __getattr__(self, fname):
# Initialize driver
self._initialize_api()
# First try if it is an hsa property
try:
enum, typ = self._hsa_properties[fname]
result = typ()
self.hsa_system_get_info(enum, ctypes.byref(result))
return result.value
except KeyError:
pass
# if not a property... try if it is an api call
try:
proto = self._api_prototypes[fname]
except KeyError:
raise AttributeError(fname)
if self.initialization_error is not None:
raise HsaSupportError("Error at driver init: \n%s:" %
self.initialization_error)
# Find function in driver library
libfn = self._find_api(fname)
for key, val in proto.items():
setattr(libfn, key, val)
def driver_wrapper(fn):
def wrapped(*args, **kwargs):
_logger.debug('call driver api: %s', fname)
return fn(*args, **kwargs)
return wrapped
retval = driver_wrapper(libfn)
setattr(self, fname, retval)
return retval
def _find_api(self, fname):
# Try regular
try:
return getattr(self.lib, fname)
except AttributeError:
pass
# Not found.
# Delay missing function error to use
def absent_function(*args, **kws):
raise HsaDriverError(MISSING_FUNCTION_ERRMSG % fname)
setattr(self, fname, absent_function)
return absent_function
@property
def components(self):
"""Returns a ordered list of components
The first device should be picked first
"""
return list(filter(lambda a: a.is_component, reversed(sorted(
self.agents))))
def create_stream(self):
st = Stream()
self._active_streams.add(st)
return st
def implicit_sync(self):
"""
Implicit synchronization for all asynchronous streams
across all devices.
"""
_logger.info("implicit sync")
for st in self._active_streams:
st.synchronize()
hsa = Driver()
class HsaWrapper(object):
def __getattr__(self, fname):
try:
enum, typ = self._hsa_properties[fname]
except KeyError:
raise AttributeError(
"%r object has no attribute %r" % (self.__class__, fname))
func = getattr(hsa, self._hsa_info_function)
result = typ()
is_array_type = hasattr(typ, '_length_')
# if the result is not ctypes array, get a reference)
result_buff = result if is_array_type else ctypes.byref(result)
func(self._id, enum, result_buff)
if not is_array_type or typ._type_ == ctypes.c_char:
return result.value
else:
return list(result)
def __dir__(self):
return sorted(set(dir(type(self)) +
self.__dict__.keys() +
self._hsa_properties.keys()))
@total_ordering
class Agent(HsaWrapper):
"""Abstracts a HSA compute agent.
This will wrap and provide an OO interface for hsa_agent_t C-API elements
"""
# Note this will be handled in a rather unconventional way. When agents get
# initialized by the driver, a set of instances for all the available agents
# will be created. After that creation, the __new__ and __init__ methods will
# be replaced, and the constructor will act as a mapping from an agent_id to
# the equivalent Agent object. Any attempt to create an Agent with a non
# existing agent_id will result in an error.
#
# the logic for this resides in Driver._initialize_agents
_hsa_info_function = 'hsa_agent_get_info'
_hsa_properties = {
'name': (enums.HSA_AGENT_INFO_NAME, ctypes.c_char * 64),
'vendor_name': (enums.HSA_AGENT_INFO_VENDOR_NAME, ctypes.c_char * 64),
'feature': (enums.HSA_AGENT_INFO_FEATURE, drvapi.hsa_agent_feature_t),
'wavefront_size': (
enums.HSA_AGENT_INFO_WAVEFRONT_SIZE, ctypes.c_uint32),
'workgroup_max_dim': (
enums.HSA_AGENT_INFO_WORKGROUP_MAX_DIM, ctypes.c_uint16 * 3),
'grid_max_dim': (enums.HSA_AGENT_INFO_GRID_MAX_DIM, drvapi.hsa_dim3_t),
'grid_max_size': (enums.HSA_AGENT_INFO_GRID_MAX_SIZE, ctypes.c_uint32),
'fbarrier_max_size': (
enums.HSA_AGENT_INFO_FBARRIER_MAX_SIZE, ctypes.c_uint32),
'queues_max': (enums.HSA_AGENT_INFO_QUEUES_MAX, ctypes.c_uint32),
'queue_max_size': (
enums.HSA_AGENT_INFO_QUEUE_MAX_SIZE, ctypes.c_uint32),
'queue_type': (
enums.HSA_AGENT_INFO_QUEUE_TYPE, drvapi.hsa_queue_type_t),
'node': (enums.HSA_AGENT_INFO_NODE, ctypes.c_uint32),
'_device': (enums.HSA_AGENT_INFO_DEVICE, drvapi.hsa_device_type_t),
'cache_size': (enums.HSA_AGENT_INFO_CACHE_SIZE, ctypes.c_uint32 * 4),
'isa': (enums.HSA_AGENT_INFO_ISA, drvapi.hsa_isa_t),
}
def __init__(self, agent_id):
# This init will only happen when initializing the agents. After
# the agent initialization the instances of this class are considered
# initialized and locked, so this method will be removed.
self._id = agent_id
self._recycler = hsa._recycler
self._queues = set()
self._initialize_regions()
self._initialize_mempools()
@property
def device(self):
return _device_type_to_string(self._device)
@property
def is_component(self):
return (self.feature & enums.HSA_AGENT_FEATURE_KERNEL_DISPATCH) != 0
@property
def regions(self):
return self._regions
@property
def mempools(self):
return self._mempools
@property
def wavebits(self):
"""
log2(wavefront_size)
"""
# assume wavefront_size will always be a power of 2
return bin(self.wavefront_size)[::-1].index('1')
def _initialize_regions(self):
region_ids = []
def on_region(region_id, ctxt):
region_ids.append(region_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_AGENT_ITERATE_REGIONS_CALLBACK_FUNC(on_region)
hsa.hsa_agent_iterate_regions(self._id, callback, None)
self._regions = _RegionList([MemRegion.instance_for(self, region_id)
for region_id in region_ids])
def _initialize_mempools(self):
mempool_ids = []
def on_region(_id, ctxt=None):
mempool_ids.append(_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_AMD_AGENT_ITERATE_MEMORY_POOLS_CALLBACK(on_region)
hsa.hsa_amd_agent_iterate_memory_pools(self._id, callback, None)
self._mempools = _RegionList([MemPool.instance_for(self, mempool_id)
for mempool_id in mempool_ids])
def _create_queue(self, size, callback=None, data=None,
private_segment_size=None, group_segment_size=None,
queue_type=None):
assert queue_type is not None
assert size <= self.queue_max_size
cb_typ = drvapi.HSA_QUEUE_CALLBACK_FUNC
cb = ctypes.cast(None, cb_typ) if callback is None else cb_typ(callback)
result = ctypes.POINTER(drvapi.hsa_queue_t)()
private_segment_size = (ctypes.c_uint32(-1)
if private_segment_size is None
else private_segment_size)
group_segment_size = (ctypes.c_uint32(-1)
if group_segment_size is None
else group_segment_size)
hsa.hsa_queue_create(self._id, size, queue_type, cb, data,
private_segment_size, group_segment_size,
ctypes.byref(result))
q = Queue(self, result)
self._queues.add(q)
return weakref.proxy(q)
def create_queue_single(self, *args, **kwargs):
kwargs['queue_type'] = enums.HSA_QUEUE_TYPE_SINGLE
return self._create_queue(*args, **kwargs)
def create_queue_multi(self, *args, **kwargs):
kwargs['queue_type'] = enums.HSA_QUEUE_TYPE_MULTI
return self._create_queue(*args, **kwargs)
def release(self):
"""
Release all resources
Called at system teardown
"""
for q in list(self._queues):
q.release()
def release_queue(self, queue):
self._queues.remove(queue)
self._recycler.free(queue)
def __repr__(self):
return "<HSA agent ({0}): {1} {2} '{3}'{4}>".format(self._id,
self.device,
self.vendor_name,
self.name,
" (component)" if self.is_component else "")
def _rank(self):
return (self.is_component, self.grid_max_size, self._device)
def __lt__(self, other):
if isinstance(self, Agent):
return self._rank() < other._rank()
else:
return NotImplemented
def __eq__(self, other):
if isinstance(self, Agent):
return self._rank() == other._rank()
else:
return NotImplemented
def __hash__(self):
return hash(self._rank())
def create_context(self):
return Context(self)
class _RegionList(Sequence):
__slots__ = '_all', 'globals', 'readonlys', 'privates', 'groups'
def __init__(self, lst):
self._all = tuple(lst)
self.globals = tuple(x for x in lst if x.kind == 'global')
self.readonlys = tuple(x for x in lst if x.kind == 'readonly')
self.privates = tuple(x for x in lst if x.kind == 'private')
self.groups = tuple(x for x in lst if x.kind == 'group')
def __len__(self):
return len(self._all)
def __contains__(self, item):
return item in self._all
def __reversed__(self):
return reversed(self._all)
def __getitem__(self, idx):
return self._all[idx]
class MemPool(HsaWrapper):
"""Abstracts a HSA mem pool.
This will wrap and provide an OO interface for hsa_amd_memory_pool_t
C-API elements
"""
_hsa_info_function = 'hsa_amd_memory_pool_get_info'
_hsa_properties = {
'segment': (
enums_ext.HSA_AMD_MEMORY_POOL_INFO_SEGMENT,
drvapi.hsa_amd_segment_t
),
'_flags': (
enums_ext.HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS,
ctypes.c_uint32
),
'size': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_SIZE,
ctypes.c_size_t),
'alloc_allowed': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
ctypes.c_bool),
'alloc_granule': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE,
ctypes.c_size_t),
'alloc_alignment': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALIGNMENT,
ctypes.c_size_t),
'accessible_by_all': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_ACCESSIBLE_BY_ALL,
ctypes.c_bool),
}
_segment_name_map = {
enums_ext.HSA_AMD_SEGMENT_GLOBAL: 'global',
enums_ext.HSA_AMD_SEGMENT_READONLY: 'readonly',
enums_ext.HSA_AMD_SEGMENT_PRIVATE: 'private',
enums_ext.HSA_AMD_SEGMENT_GROUP: 'group',
}
def __init__(self, agent, pool):
"""Do not instantiate MemPool objects directly, use the factory class
method 'instance_for' to ensure MemPool identity"""
self._id = pool
self._owner_agent = agent
self._as_parameter_ = self._id
@property
def kind(self):
return self._segment_name_map[self.segment]
@property
def agent(self):
return self._owner_agent
def supports(self, check_flag):
"""
Determines if a given feature is supported by this MemRegion.
Feature flags are found in "./enums_exp.py" under:
* hsa_amd_memory_pool_global_flag_t
Params:
check_flag: Feature flag to test
"""
if self.kind == 'global':
return self._flags & check_flag
else:
return False
def allocate(self, nbytes):
assert self.alloc_allowed
assert nbytes >= 0
buff = ctypes.c_void_p()
flags = ctypes.c_uint32(0) # From API docs "Must be 0"!
hsa.hsa_amd_memory_pool_allocate(self._id, nbytes, flags, ctypes.byref(buff))
if buff.value is None:
raise HsaDriverError("Failed to allocate from {}".format(self))
return buff
_instance_dict = {}
@classmethod
def instance_for(cls, owner, _id):
try:
return cls._instance_dict[_id]
except KeyError:
new_instance = cls(owner, _id)
cls._instance_dict[_id] = new_instance
return new_instance
class MemRegion(HsaWrapper):
"""Abstracts a HSA memory region.
This will wrap and provide an OO interface for hsa_region_t C-API elements
"""
_hsa_info_function = 'hsa_region_get_info'
_hsa_properties = {
'segment': (
enums.HSA_REGION_INFO_SEGMENT,
drvapi.hsa_region_segment_t
),
'_flags': (
enums.HSA_REGION_INFO_GLOBAL_FLAGS,
drvapi.hsa_region_global_flag_t
),
'host_accessible': (enums_ext.HSA_AMD_REGION_INFO_HOST_ACCESSIBLE,
ctypes.c_bool),
'size': (enums.HSA_REGION_INFO_SIZE,
ctypes.c_size_t),
'alloc_max_size': (enums.HSA_REGION_INFO_ALLOC_MAX_SIZE,
ctypes.c_size_t),
'alloc_alignment': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALIGNMENT,
ctypes.c_size_t),
'alloc_granule': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_GRANULE,
ctypes.c_size_t),
'alloc_allowed': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED,
ctypes.c_bool),
}
_segment_name_map = {
enums.HSA_REGION_SEGMENT_GLOBAL: 'global',
enums.HSA_REGION_SEGMENT_READONLY: 'readonly',
enums.HSA_REGION_SEGMENT_PRIVATE: 'private',
enums.HSA_REGION_SEGMENT_GROUP: 'group',
}
def __init__(self, agent, region_id):
"""Do not instantiate MemRegion objects directly, use the factory class
method 'instance_for' to ensure MemRegion identity"""
self._id = region_id
self._owner_agent = agent
self._as_parameter_ = self._id
@property
def kind(self):
return self._segment_name_map[self.segment]
@property
def agent(self):
return self._owner_agent
def supports(self, check_flag):
"""
Determines if a given feature is supported by this MemRegion.
Feature flags are found in "./enums.py" under:
* hsa_region_global_flag_t
Params:
check_flag: Feature flag to test
"""
if self.kind == 'global':
return self._flags & check_flag
else:
return False
def allocate(self, nbytes):
assert self.alloc_allowed
assert nbytes <= self.alloc_max_size
assert nbytes >= 0
buff = ctypes.c_void_p()
hsa.hsa_memory_allocate(self._id, nbytes, ctypes.byref(buff))
return buff
def free(self, ptr):
hsa.hsa_memory_free(ptr)
_instance_dict = {}
@classmethod
def instance_for(cls, owner, _id):
try:
return cls._instance_dict[_id]
except KeyError:
new_instance = cls(owner, _id)
cls._instance_dict[_id] = new_instance
return new_instance
class Queue(object):
def __init__(self, agent, queue_ptr):
"""The id in a queue is a pointer to the queue object returned by hsa_queue_create.
The Queue object has ownership on that queue object"""
self._agent = weakref.proxy(agent)
self._id = queue_ptr
self._as_parameter_ = self._id
self._finalizer = hsa.hsa_queue_destroy
def release(self):
self._agent.release_queue(self)
def __getattr__(self, fname):
return getattr(self._id.contents, fname)
@contextmanager
def _get_packet(self, packet_type):
# Write AQL packet at the calculated queue index address
queue_struct = self._id.contents
queue_mask = queue_struct.size - 1
assert (ctypes.sizeof(packet_type) ==
ctypes.sizeof(drvapi.hsa_kernel_dispatch_packet_t))
packet_array_t = (packet_type * queue_struct.size)
# Obtain the current queue write index
index = hsa.hsa_queue_add_write_index_acq_rel(self._id, 1)
while True:
read_offset = hsa.hsa_queue_load_read_index_acquire(self._id)
if read_offset <= index < read_offset + queue_struct.size:
break
queue_offset = index & queue_mask
queue = packet_array_t.from_address(queue_struct.base_address)
packet = queue[queue_offset]
# zero init
ctypes.memset(ctypes.addressof(packet), 0, ctypes.sizeof(packet_type))
yield packet
# Increment write index
# Ring the doorbell
hsa.hsa_signal_store_release(self._id.contents.doorbell_signal, index)
def insert_barrier(self, dep_signal):
with self._get_packet(drvapi.hsa_barrier_and_packet_t) as packet:
# Populate packet
packet.dep_signal0 = dep_signal._id
header = 0
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE
header |= enums.HSA_PACKET_TYPE_BARRIER_AND << enums.HSA_PACKET_HEADER_TYPE
header |= 1 << enums.HSA_PACKET_HEADER_BARRIER
# Original example calls for an atomic store.
# Since we are on x86, store of aligned 16 bit is atomic.
# The C code is
# __atomic_store_n((uint16_t*)(&dispatch_packet->header), header, __ATOMIC_RELEASE);
packet.header = header
def dispatch(self, symbol, kernargs,
workgroup_size=None,
grid_size=None,
signal=None):
_logger.info("dispatch %s", symbol.name)
dims = len(workgroup_size)
assert dims == len(grid_size)
assert 0 < dims <= 3
assert grid_size >= workgroup_size
if workgroup_size > tuple(self._agent.workgroup_max_dim)[:dims]:
msg = "workgroupsize is too big {0} > {1}"
raise HsaDriverError(msg.format(workgroup_size,
tuple(self._agent.workgroup_max_dim)[:dims]))
s = signal if signal is not None else hsa.create_signal(1)
# Note: following vector_copy.c
with self._get_packet(drvapi.hsa_kernel_dispatch_packet_t) as packet:
# Populate packet
packet.setup |= dims << enums.HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS
packet.workgroup_size_x = workgroup_size[0]
packet.workgroup_size_y = workgroup_size[1] if dims > 1 else 1
packet.workgroup_size_z = workgroup_size[2] if dims > 2 else 1
packet.grid_size_x = grid_size[0]
packet.grid_size_y = grid_size[1] if dims > 1 else 1
packet.grid_size_z = grid_size[2] if dims > 2 else 1
packet.completion_signal = s._id
packet.kernel_object = symbol.kernel_object
packet.kernarg_address = (0 if kernargs is None
else kernargs.value)
packet.private_segment_size = symbol.private_segment_size
packet.group_segment_size = symbol.group_segment_size
header = 0
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE
header |= enums.HSA_PACKET_TYPE_KERNEL_DISPATCH << enums.HSA_PACKET_HEADER_TYPE
# Original example calls for an atomic store.
# Since we are on x86, store of aligned 16 bit is atomic.
# The C code is
# __atomic_store_n((uint16_t*)(&dispatch_packet->header), header, __ATOMIC_RELEASE);
packet.header = header
# Wait on the dispatch completion signal
# synchronous if no signal was provided
if signal is None:
_logger.info('wait for synchronous kernel to complete')
timeout = 10
if not s.wait_until_ne_one(timeout=timeout):
msg = "Kernel timed out after {timeout} second"
raise HsaKernelTimedOut(msg.format(timeout=timeout))
def __dir__(self):
return sorted(set(dir(self._id.contents) +
self.__dict__.keys()))
def owned(self):
return ManagedQueueProxy(self)
class ManagedQueueProxy(object):
def __init__(self, queue):
self._queue = weakref.ref(queue)
def __getattr__(self, item):
return getattr(self._queue(), item)
class Signal(object):
"""The id for the signal is going to be the hsa_signal_t returned by create_signal.
Lifetime of the underlying signal will be tied with this object".
Note that it is likely signals will have lifetime issues."""
def __init__(self, signal_id):
self._id = signal_id
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_signal_destroy, self._id)
def load_relaxed(self):
return hsa.hsa_signal_load_relaxed(self._id)
def load_acquire(self):
return hsa.hsa_signal_load_acquire(self._id)
def wait_until_ne_one(self, timeout=None):
"""
Returns a boolean to indicate whether the wait has timeout
"""
one = 1
mhz = 10 ** 6
if timeout is None:
# Infinite
expire = -1 # UINT_MAX
else:
# timeout as seconds
expire = timeout * hsa.timestamp_frequency * mhz
# XXX: use active wait instead of blocked seem to avoid hang in docker
hsa.hsa_signal_wait_acquire(self._id, enums.HSA_SIGNAL_CONDITION_NE,
one, expire,
enums.HSA_WAIT_STATE_ACTIVE)
return self.load_relaxed() != one
class BrigModule(object):
def __init__(self, brig_buffer):
"""
Take a byte buffer of a Brig module
"""
buf = ctypes.create_string_buffer(brig_buffer)
self._buffer = buf
self._id = ctypes.cast(ctypes.addressof(buf),
drvapi.hsa_ext_module_t)
@classmethod
def from_file(cls, file_name):
with open(file_name, 'rb') as fin:
buf = fin.read()
return BrigModule(buf)
def __len__(self):
return len(self._buffer)
def __repr__(self):
return "<BrigModule id={0} size={1}bytes>".format(hex(id(self)),
len(self))
class Program(object):
def __init__(self, model=enums.HSA_MACHINE_MODEL_LARGE,
profile=enums.HSA_PROFILE_FULL,
rounding_mode=enums.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT,
options=None, version_major=1, version_minor=0):
self._id = drvapi.hsa_ext_program_t()
assert options is None
def check_fptr_return(hsa_status):
if hsa_status is not enums.HSA_STATUS_SUCCESS:
msg = ctypes.c_char_p()
hsa.hsa_status_string(hsa_status, ctypes.byref(msg))
_logger.info(msg.value.decode("utf-8"))
exit(-hsa_status)
support = ctypes.c_bool(0)
hsa.hsa_system_extension_supported(enums.HSA_EXTENSION_FINALIZER,
version_major,
version_minor,
ctypes.byref(support))
assert support.value, ('HSA system extension %s.%s not supported' %
(version_major, version_minor))
# struct of function pointers
self._ftabl = drvapi.hsa_ext_finalizer_1_00_pfn_t()
# populate struct
hsa.hsa_system_get_extension_table(enums.HSA_EXTENSION_FINALIZER,
version_major,
version_minor,
ctypes.byref(self._ftabl))
ret = self._ftabl.hsa_ext_program_create(model, profile,
rounding_mode, options,
ctypes.byref(self._id))
check_fptr_return(ret)
self._as_parameter_ = self._id
weakref.finalize(self, self._ftabl.hsa_ext_program_destroy,
self._id)
def add_module(self, module):
self._ftabl.hsa_ext_program_add_module(self._id, module._id)
def finalize(self, isa, callconv=0, options=None):
"""
The program object is safe to be deleted after ``finalize``.
"""
code_object = drvapi.hsa_code_object_t()
control_directives = drvapi.hsa_ext_control_directives_t()
ctypes.memset(ctypes.byref(control_directives), 0,
ctypes.sizeof(control_directives))
self._ftabl.hsa_ext_program_finalize(self._id,
isa,
callconv,
control_directives,
options,
enums.HSA_CODE_OBJECT_TYPE_PROGRAM,
ctypes.byref(code_object))
return CodeObject(code_object)
class CodeObject(object):
def __init__(self, code_object):
self._id = code_object
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_code_object_destroy, self._id)
class Executable(object):
def __init__(self):
ex = drvapi.hsa_executable_t()
hsa.hsa_executable_create(enums.HSA_PROFILE_FULL,
enums.HSA_EXECUTABLE_STATE_UNFROZEN,
None,
ctypes.byref(ex))
self._id = ex
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_executable_destroy, self._id)
def load(self, agent, code_object):
hsa.hsa_executable_load_code_object(self._id, agent._id,
code_object._id, None)
def freeze(self):
"""Freeze executable before we can query for symbol"""
hsa.hsa_executable_freeze(self._id, None)
def get_symbol(self, agent, name):
symbol = drvapi.hsa_executable_symbol_t()
hsa.hsa_executable_get_symbol(self._id, None,
ctypes.create_string_buffer(
name.encode('ascii')),
agent._id, 0,
ctypes.byref(symbol))
return Symbol(name, symbol)
class Symbol(HsaWrapper):
_hsa_info_function = 'hsa_executable_symbol_get_info'
_hsa_properties = {
'kernel_object': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
ctypes.c_uint64,
),
'kernarg_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE,
ctypes.c_uint32,
),
'group_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
ctypes.c_uint32,
),
'private_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
ctypes.c_uint32,
),
}
def __init__(self, name, symbol_id):
self._id = symbol_id
self.name = name
class MemoryPointer(object):
__hsa_memory__ = True
def __init__(self, context, pointer, size, finalizer=None):
assert isinstance(context, Context)
self.context = context
self.device_pointer = pointer
self.size = size
self._hsa_memsize_ = size
self.finalizer = finalizer
self.is_managed = finalizer is not None
self.is_alive = True
self.refct = 0
def __del__(self):
try:
if self.is_managed and self.is_alive:
self.finalizer()
except:
traceback.print_exc()
def own(self):
return OwnedPointer(weakref.proxy(self))
def free(self):
"""
Forces the device memory to the trash.
"""
if self.is_managed:
if not self.is_alive:
raise RuntimeError("Freeing dead memory")
self.finalizer()
self.is_alive = False
def view(self):
pointer = self.device_pointer.value
view = MemoryPointer(self.context, pointer, self.size)
return OwnedPointer(weakref.proxy(self), view)
@property
def device_ctypes_pointer(self):
return self.device_pointer
def allow_access_to(self, *agents):
"""
Grant access to given *agents*.
Upon return, only the listed-agents and the owner agent have direct
access to this pointer.
"""
ct = len(agents)
if ct == 0:
return
agent_array = (ct * drvapi.hsa_agent_t)(*[a._id for a in agents])
hsa.hsa_amd_agents_allow_access(ct, agent_array, None,
self.device_pointer)
class HostMemory(mviewbuf.MemAlloc):
def __init__(self, context, owner, pointer, size):
self.context = context
self.owned = owner
self.size = size
self.host_pointer = pointer
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
def own(self):
return self
class OwnedPointer(object):
def __init__(self, memptr, view=None):
self._mem = memptr
self._mem.refct += 1
if view is None:
self._view = self._mem
else:
assert not view.is_managed
self._view = view
def __del__(self):
try:
self._mem.refct -= 1
assert self._mem.refct >= 0
if self._mem.refct == 0:
self._mem.free()
except ReferenceError:
pass
except:
traceback.print_exc()
def __getattr__(self, fname):
"""Proxy MemoryPointer methods
"""
return getattr(self._view, fname)
class Context(object):
"""
A context is associated with a component
"""
"""
Parameters:
agent the agent, and instance of the class Agent
"""
# a weak set of active Stream objects
_active_streams = weakref.WeakSet()
def __init__(self, agent):
self._agent = weakref.proxy(agent)
if self._agent.is_component: # only components have queues
qs = agent.queue_max_size
defq = self._agent.create_queue_multi(qs, callback=self._callback)
self._defaultqueue = defq.owned()
self.allocations = utils.UniqueDict()
# get pools
coarse_flag = enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED
fine_flag = enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED
alloc_mps = [mp for mp in agent.mempools.globals if mp.alloc_allowed]
self._coarsegrain_mempool = None
self._finegrain_mempool = None
for mp in alloc_mps:
if mp.supports(coarse_flag):
self._coarsegrain_mempool = mp
if mp.supports(fine_flag):
self._finegrain_mempool = mp
def _callback(self, status, queue):
drvapi._check_error(status, queue)
sys.exit(1)
@property
def unproxy(self):
# This is a trick to help handle weakproxy comparison with actual
# instance.
# See https://stackoverflow.com/a/49319989 for inspiration and the
# whole page for more general discussion.
return self
@property
def default_queue(self):
return self._defaultqueue
@property
def agent(self):
return self._agent
@property
def coarsegrain_mempool(self):
if self._coarsegrain_mempool is None:
msg = 'coarsegrain mempool is not available in {}'.format(self._agent)
raise ValueError(msg)
return self._coarsegrain_mempool
@property
def finegrain_mempool(self):
if self._finegrain_mempool is None:
msg = 'finegrain mempool is not available in {}'.format(self._agent)
raise ValueError(msg)
return self._finegrain_mempool
def memalloc(self, nbytes, memTypeFlags=None, hostAccessible=True):
"""
Allocates memory.
Parameters:
nbytes the number of bytes to allocate.
memTypeFlags the flags for which the memory region must have support,\
due to the inherent rawness of the underlying call, the\
validity of the flag is not checked, cf. C language.
hostAccessible boolean as to whether the region in which the\
allocation takes place should be host accessible
"""
hw = self._agent.device
all_reg = self._agent.regions
flag_ok_r = list() # regions which pass the memTypeFlags test
regions = list()
# don't support DSP
if hw == "GPU" or hw == "CPU":
# check user requested flags
if memTypeFlags is not None:
for r in all_reg:
count = 0
for flags in memTypeFlags:
if r.supports(flags):
count += 1
if count == len(memTypeFlags):
flag_ok_r.append(r)
else:
flag_ok_r = all_reg
# check system required flags for allocation
for r in flag_ok_r:
# check the mem region is coarse grained if dGPU present
# TODO: this probably ought to explicitly check for a dGPU.
if (hw == "GPU" and
not r.supports(enums.HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED)):
continue
# check accessibility criteria
if hostAccessible:
if r.host_accessible:
regions.append(r)
else:
if not r.host_accessible:
regions.append(r)
else:
raise RuntimeError("Unknown device type string \"%s\"" % hw)
assert len(regions) > 0, "No suitable memory regions found."
# walk though valid regions trying to malloc until there's none left
mem = None
for region_id in regions:
try:
mem = MemRegion.instance_for(self._agent, region_id)\
.allocate(nbytes)
except HsaApiError: # try next memory region if an allocation fails
pass
else: # allocation succeeded, stop looking for memory
break
if mem is None:
raise RuntimeError("Memory allocation failed. No agent/region \
combination could meet allocation restraints \
(hardware = %s, size = %s, flags = %s)." % \
( hw, nbytes, memTypeFlags))
fin = _make_mem_finalizer(hsa.hsa_memory_free)
ret = MemoryPointer(weakref.proxy(self), mem, nbytes,
finalizer=fin(self, mem))
if mem.value is None:
raise RuntimeError("MemoryPointer has no value")
self.allocations[mem.value] = ret
return ret.own()
def mempoolalloc(self, nbytes, allow_access_to=(), finegrain=False):
"""
Allocates memory in a memory pool.
Parameters:
*nbytes* the number of bytes to allocate.
*allow_acces_to*
*finegrain*
"""
mempool = (self.finegrain_mempool
if finegrain
else self.coarsegrain_mempool)
buff = mempool.allocate(nbytes)
fin = _make_mem_finalizer(hsa.hsa_amd_memory_pool_free)
mp = MemoryPointer(weakref.proxy(self), buff, nbytes,
finalizer=fin(self, buff))
mp.allow_access_to(*allow_access_to)
self.allocations[buff.value] = mp
return mp.own()
def memhostalloc(self, size, finegrain, allow_access_to):
mem = self.mempoolalloc(size, allow_access_to=allow_access_to,
finegrain=finegrain)
return HostMemory(weakref.proxy(self), owner=mem,
pointer=mem.device_pointer, size=mem.size)
class Stream(object):
"""
An asynchronous stream for async API
"""
def __init__(self):
self._signals = deque()
self._callbacks = defaultdict(list)
def _add_signal(self, signal):
"""
Add a signal that corresponds to an async task.
"""
# XXX: too many pending signals seem to cause async copy to hang
if len(self._signals) > 100:
self._sync(50)
self._signals.append(signal)
def _add_callback(self, callback):
assert callable(callback)
self._callbacks[self._get_last_signal()].append(callback)
def _get_last_signal(self):
"""
Get the last signal.
"""
return self._signals[-1] if self._signals else None
def synchronize(self):
"""
Synchronize the stream.
"""
self._sync(len(self._signals))
def _sync(self, limit):
ct = 0
while self._signals:
if ct >= limit:
break
sig = self._signals.popleft()
if sig.load_relaxed() == 1:
sig.wait_until_ne_one()
for cb in self._callbacks[sig]:
cb()
del self._callbacks[sig]
ct += 1
@contextmanager
def auto_synchronize(self):
'''
A context manager that waits for all commands in this stream to execute
and commits any pending memory transfers upon exiting the context.
'''
yield self
self.synchronize()
def _make_mem_finalizer(dtor):
"""
finalises memory
Parameters:
dtor a function that will delete/free held memory from a reference
Returns:
Finalising function
"""
def mem_finalize(context, handle):
allocations = context.allocations
sync = hsa.implicit_sync
def core():
_logger.info("Current allocations: %s", allocations)
if allocations:
_logger.info("Attempting delete on %s" % handle.value)
del allocations[handle.value]
sync() # implicit sync
dtor(handle)
return core
return mem_finalize
def device_pointer(obj):
"Get the device pointer as an integer"
return device_ctypes_pointer(obj).value
def device_ctypes_pointer(obj):
"Get the ctypes object for the device pointer"
if obj is None:
return c_void_p(0)
require_device_memory(obj)
return obj.device_ctypes_pointer
def is_device_memory(obj):
"""All HSA dGPU memory object is recognized as an instance with the
attribute "__hsa_memory__" defined and its value evaluated to True.
All HSA memory object should also define an attribute named
"device_pointer" which value is an int(or long) object carrying the pointer
value of the device memory address. This is not tested in this method.
"""
return getattr(obj, '__hsa_memory__', False)
def require_device_memory(obj):
"""A sentry for methods that accept HSA memory object.
"""
if not is_device_memory(obj):
raise Exception("Not a HSA memory object.")
def host_pointer(obj):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
if isinstance(obj, int):
return obj
forcewritable = isinstance(obj, np.void)
return mviewbuf.memoryview_get_buffer(obj, forcewritable)
def host_to_dGPU(context, dst, src, size):
"""
Copy data from a host memory region to a dGPU.
Parameters:
context the dGPU context
dst a pointer to the destination location in dGPU memory
src a pointer to the source location in host memory
size the size (in bytes) of data to transfer
"""
_logger.info("CPU->dGPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(device_pointer(dst), host_pointer(src), size)
def dGPU_to_host(context, dst, src, size):
"""
Copy data from a host memory region to a dGPU.
Parameters:
context the dGPU context
dst a pointer to the destination location in dGPU memory
src a pointer to the source location in host memory
size the size (in bytes) of data to transfer
"""
_logger.info("dGPU->CPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(host_pointer(dst), device_pointer(src), size)
def dGPU_to_dGPU(context, dst, src, size):
_logger.info("dGPU->dGPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(device_pointer(dst), device_pointer(src), size)
def async_host_to_dGPU(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async CPU->dGPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
src=host_pointer(src), dst=device_pointer(dst),
size=size, stream=stream)
def async_dGPU_to_host(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async dGPU->CPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
dst=host_pointer(dst), src=device_pointer(src),
size=size, stream=stream)
def async_dGPU_to_dGPU(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async dGPU->dGPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
dst=device_pointer(dst), src=device_pointer(src),
size=size, stream=stream)
def async_copy_dgpu(dst_ctx, src_ctx, dst, src, size, stream):
if size < 0:
raise ValueError("Invalid size given: %s" % size)
completion_signal = hsa.create_signal(1)
dependent_signal = stream._get_last_signal()
if dependent_signal is not None:
dsignal = drvapi.hsa_signal_t(dependent_signal._id)
signals = (1, ctypes.byref(dsignal), completion_signal)
else:
signals = (0, None, completion_signal)
hsa.hsa_amd_memory_async_copy(dst, dst_ctx._agent._id,
src, src_ctx._agent._id,
size, *signals)
stream._add_signal(completion_signal)
def dgpu_count():
"""
Returns the number of discrete GPUs present on the current machine.
"""
ngpus = 0
try:
for a in hsa.agents:
if a.is_component and a.device == 'GPU':
ngpus += 1
except:
pass
return ngpus
"""
True if a dGPU is present in the current machine.
"""
dgpu_present = dgpu_count() > 0
| stonebig/numba | numba/roc/hsadrv/driver.py | Python | bsd-2-clause | 51,876 | 0.001292 |
import os
import re
import sublime
import sublime_plugin
class ExpandTabsOnLoad(sublime_plugin.EventListener):
# Run ST's 'expand_tabs' command when opening a file,
# only if there are any tab characters in the file
def on_load(self, view):
expand_tabs = view.settings().get("expand_tabs_on_load", False)
if expand_tabs and view.find("\t", 0):
view.run_command("expand_tabs", {"set_translate_tabs": True})
tab_size = view.settings().get("tab_size", 0)
message = "Converted tab characters to {0} spaces".format(tab_size)
sublime.status_message(message)
| markashleybell/ExpandTabsOnLoad | ExpandTabsOnLoad.py | Python | mit | 630 | 0 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import hmac
from cryptography.hazmat.bindings._constant_time import lib
if hasattr(hmac, "compare_digest"):
def bytes_eq(a, b):
if not isinstance(a, bytes) or not isinstance(b, bytes):
raise TypeError("a and b must be bytes.")
return hmac.compare_digest(a, b)
else:
def bytes_eq(a, b):
if not isinstance(a, bytes) or not isinstance(b, bytes):
raise TypeError("a and b must be bytes.")
return lib.Cryptography_constant_time_bytes_eq(
a, len(a), b, len(b)
) == 1
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/cryptography/hazmat/primitives/constant_time.py | Python | apache-2.0 | 798 | 0 |
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <denis-alexander.engemann@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from scipy.special import expit
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.datasets import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert density(X_) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis=axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, Vt = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
Vt = Vt.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], Vt[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.1
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert X.shape == (n_samples, n_features)
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, Vt = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord='fro')
U, s, Vt = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(Vt))
error_20 = linalg.norm(A, ord='fro')
assert np.abs(error_2 - error_20) > 100
for normalizer in ['LU', 'QR', 'auto']:
U, s, Vt = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, Vt = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(Vt))
error = linalg.norm(A, ord='fro')
assert 15 > np.abs(error_2 - error)
def test_randomized_svd_sparse_warnings():
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
for cls in (sparse.lil_matrix, sparse.dok_matrix):
X = cls(X)
assert_warns_message(
sparse.SparseEfficiencyWarning,
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(cls.__name__),
randomized_svd, X, n_components, n_iter=1,
power_iteration_normalizer='none')
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, Vt = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, Vt = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, Vt, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, Vt, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert u_based
assert not v_based
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert u_based
assert not v_based
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(expit(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from https://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]])
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]])
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Naive one pass var: >tol (=1063)
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert n == A.shape[0]
# the mean is also slightly unstable
assert np.abs(A.mean(axis=0) - mean).max() > 1e-6
assert np.abs(np_var(A) - var).max() > tol
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0],
dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("B_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d(A_array_constr, B_array_constr):
rng = np.random.RandomState(0)
A = rng.random_sample((30, 10))
B = rng.random_sample((10, 20))
expected = np.dot(A, B)
A = A_array_constr(A)
B = B_array_constr(B)
actual = safe_sparse_dot(A, B, dense_output=True)
assert_allclose(actual, expected)
def test_safe_sparse_dot_nd():
rng = np.random.RandomState(0)
# dense ND / sparse
A = rng.random_sample((2, 3, 4, 5, 6))
B = rng.random_sample((6, 7))
expected = np.dot(A, B)
B = sparse.csr_matrix(B)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# sparse / dense ND
A = rng.random_sample((2, 3))
B = rng.random_sample((4, 5, 3, 6))
expected = np.dot(A, B)
A = sparse.csr_matrix(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d_1d(A_array_constr):
rng = np.random.RandomState(0)
B = rng.random_sample((10))
# 2D @ 1D
A = rng.random_sample((30, 10))
expected = np.dot(A, B)
A = A_array_constr(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# 1D @ 2D
A = rng.random_sample((10, 30))
expected = np.dot(B, A)
A = A_array_constr(A)
actual = safe_sparse_dot(B, A)
assert_allclose(actual, expected)
@pytest.mark.parametrize("dense_output", [True, False])
def test_safe_sparse_dot_dense_output(dense_output):
rng = np.random.RandomState(0)
A = sparse.random(30, 10, density=0.1, random_state=rng)
B = sparse.random(10, 20, density=0.1, random_state=rng)
expected = A.dot(B)
actual = safe_sparse_dot(A, B, dense_output=dense_output)
assert sparse.issparse(actual) == (not dense_output)
if dense_output:
expected = expected.toarray()
assert_allclose_dense_sparse(actual, expected)
| bnaul/scikit-learn | sklearn/utils/tests/test_extmath.py | Python | bsd-3-clause | 26,768 | 0 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7046
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| cmtcoin/wallet | contrib/pyminer/pyminer.py | Python | mit | 6,434 | 0.034815 |
#!/usr/bin/env python
import sys
import os
import re
from lib2to3.pgen2.driver import Driver
from lib2to3 import pygram, pytree
from lib2to3.pytree import Node, Leaf, type_repr
from lib2to3.pygram import python_symbols
def sym_type(name):
return getattr(python_symbols, name)
def new_node(name):
return Node(sym_type(name), [])
import __Pyjamas__
from __Future__ import __Future__
# This is taken from the django project.
# Escape every ASCII character with a value less than 32.
JS_ESCAPES = (
('\\', r'\x5C'),
('\'', r'\x27'),
('"', r'\x22'),
('>', r'\x3E'),
('<', r'\x3C'),
('&', r'\x26'),
(';', r'\x3B')
) + tuple([('%c' % z, '\\x%02X' % z) for z in range(32)])
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in JS_ESCAPES:
value = value.replace(bad, good)
return value
re_js_string_escape = ''.join([chr(i) for i in range(32)])
re_js_string_escape += '''\\\\"'<>&;'''
re_js_string_escape = re.compile("[%s]" % re_js_string_escape)
re_int = re.compile('^[-+]?[0-9]+$')
re_long = re.compile('^[-+]?[0-9]+[lL]$')
re_hex_int = re.compile('^[-+]?0x[0-9a-fA-F]+$')
re_hex_long = re.compile('^[-+]?0x[0-9a-fA-F]+[lL]$')
re_oct_int = re.compile('^[-+]?0[0-8]+$')
re_oct_long = re.compile('^[-+]?0[0-8]+[lL]$')
builtin_names = [
'ArithmeticError',
'AssertionError',
'AttributeError',
'BaseException',
'BufferError',
'BytesWarning',
'DeprecationWarning',
'EOFError',
'Ellipsis',
'EnvironmentError',
'Exception',
'False',
'FloatingPointError',
'FutureWarning',
'GeneratorExit',
'IOError',
'ImportError',
'ImportWarning',
'IndentationError',
'IndexError',
'KeyError',
'KeyboardInterrupt',
'LookupError',
'MemoryError',
'NameError',
'None',
'NotImplemented',
'NotImplementedError',
'OSError',
'OverflowError',
'PendingDeprecationWarning',
'ReferenceError',
'RuntimeError',
'RuntimeWarning',
'StandardError',
'StopIteration',
'SyntaxError',
'SyntaxWarning',
'SystemError',
'SystemExit',
'TabError',
'True',
'TypeError',
'UnboundLocalError',
'UnicodeDecodeError',
'UnicodeEncodeError',
'UnicodeError',
'UnicodeTranslateError',
'UnicodeWarning',
'UserWarning',
'ValueError',
'Warning',
'ZeroDivisionError',
'_',
'__debug__',
'__doc__',
'__import__',
'__name__',
'__package__',
'abs',
'all',
'any',
'apply',
'basestring',
'bin',
'bool',
'buffer',
'bytearray',
'bytes',
'callable',
'chr',
'classmethod',
'cmp',
'coerce',
'compile',
'complex',
'copyright',
'credits',
'delattr',
'dict',
'dir',
'divmod',
'enumerate',
'eval',
'execfile',
'exit',
'file',
'filter',
'float',
'format',
'frozenset',
'getattr',
'globals',
'hasattr',
'hash',
'help',
'hex',
'id',
'input',
'int',
'intern',
'isinstance',
'issubclass',
'iter',
'len',
'license',
'list',
'locals',
'long',
'map',
'max',
'min',
'next',
'object',
'oct',
'open',
'ord',
'pow',
'print',
'property',
'quit',
'range',
'raw_input',
'reduce',
'reload',
'repr',
'reversed',
'round',
'set',
'setattr',
'slice',
'sorted',
'staticmethod',
'str',
'sum',
'super',
'tuple',
'type',
'unichr',
'unicode',
'vars',
'xrange',
'zip',
]
class TranslateOptions(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except:
return None
class Name(object):
def __init__(self, name, reflineno, glob=False, to_js=None):
self.name = name
self.reflineno = reflineno
self.glob = glob
self.to_js = to_js
self.depth = None
self.builtin = False
def __str__(self):
return "<Name %s %s %s>" % (self.name, self.reflineno, self.glob)
def __repr__(self):
return "<Name %s %s %s>" % (self.name, self.reflineno, self.glob)
class Names(dict):
pass
class ClassNames(Names):
pass
class AstNode(object):
pass
class Argument(AstNode):
def __init__(self, name, value=None):
self.name = name
self.value = value
class Attribute(AstNode):
def __init__(self, name):
self.name = name
class Code(AstNode):
def __new__(cls, code, lineno):
if code is None:
return None
return object.__new__(cls)
def __init__(self, code, lineno):
self.code = code
self.lineno = lineno
def __str__(self):
if self.code is None:
return None
return str(self.code)
def __repr__(self):
if self.code is None:
return None
return repr(self.code)
class Decorator(AstNode):
def __init__(self, name, lineno):
self.name = name
self.lineno = lineno
class Import(AstNode):
def __init__(self, modname, assname=None, fromlist=None):
self.modname = '.'.join(modname)
if assname is None:
self.assname = modname[0]
else:
self.assname = assname
self.fromlist = fromlist
class Parameters(AstNode):
def __init__(self, args, star_args, dstar_args, defaults):
assert isinstance(args, list)
self.args = []
self.named_args = {}
for arg in args:
if not isinstance(arg, Argument):
self.args.append(arg)
else:
if arg.name == '*':
assert star_args is None
star_args = arg.value
continue
if arg.name == '**':
assert dstar_args is None
dstar_args = arg.value
continue
self.named_args[arg.name] = arg.value
if not self.named_args:
self.named_args = None
self.star_args = star_args
self.dstar_args = dstar_args
self.all_args = args[:]
if star_args is not None:
self.all_args.append(star_args)
if dstar_args is not None:
self.all_args.append(dstar_args)
self.defaults = defaults
class Slice(AstNode):
def __init__(self, items):
assert isinstance(items, tuple)
self.items = items
def __str__(self):
return 'Slice%s' % (self.items,)
def __repr__(self):
return 'Slice%s' % (self.items,)
leaf_type = {
1: 'name',
2: 'number',
3: 'str',
}
# TODO: import this from mkbuiltin.py
func_type = {
'function': 1,
'staticmethod': 2,
'classmethod': 3,
}
# TODO: import this from mkbuiltin.py
short_names = {
'module': 'm$',
'globals': 'g$',
'locals': 'l$',
'namestack': 'n$',
'funcbase': 'f$',
'builtin': 'B$',
'constants': 'C$',
'None': 'N$',
'True': 'T$',
'False': 'F$',
}
op_names1 = {
'inv': 'op_inv',
'neg': 'op_neg',
'not': 'op_not',
}
op_names2 = {
'+': 'op_add',
'-': 'op_sub',
'*': 'op_mul',
'/': 'op_div', # set to op_truediv with 'from __future__ import division'
'//': 'op_floordiv',
'%': 'op_mod',
'**': 'op_pow',
'&': 'op_bitand',
'|': 'op_bitor',
'^': 'op_bitxor',
'<<': 'op_bitlshift',
'>>': 'op_bitrshift',
'+=': 'op_add',
'-=': 'op_sub',
'*=': 'op_mul',
'/=': 'op_div',
'//=': 'op_floordiv',
'%=': 'op_mod',
'**=': 'op_pow',
'&=': 'op_bitand',
'|=': 'op_bitor',
'^=': 'op_bitxor',
'<<=': 'op_bitlshift',
'>>=': 'op_bitrshift',
}
op_compare = {
'is': 'op_is',
'is not': 'op_is_not',
'==': 'op_eq',
'!=': 'op_ne',
'<': 'op_lt',
'<=': 'op_le',
'>': 'op_gt',
'>=': 'op_ge',
'in': 'op_in',
'not in': 'op_not_in',
}
class Translator(object):
jsvars = {
'module_store': '$pyjs.loaded_modules',
# Internals
'catch': 'e$',
'catchclass': 'e$cls',
'module': 'm$',
'globals': 'g$',
'locals': 'l$',
'funcbase': 'f$',
'builtin': 'B$',
'constants': 'C$',
#'__builtin__': '__builtin__',
'__builtin__': '__builtin__',
'track': '$pyjs.track',
# Short names
'fcall': '_f',
'fcallext': '_fe',
'mcall': '_m',
'mcallext': '_me',
'getattr': '_ga',
'setattr': '_sa',
'getitem': '_i',
'booljs': '_b',
'str': 's$',
'int': 'i$',
'bool': 'b$',
'None': 'N$',
'True': 'T$',
'False': 'F$',
# References to builtins
'try_else': "B$['TryElse']",
'dict': "B$['dict']",
'list': "B$['list']",
'tuple': "B$['tuple']",
}
jsvars.update(short_names)
indent_str = '\t'
__future__ = __Future__()
class TranslationError(RuntimeError):
filename = None
def __init__(self, msg, lineno=None, filename=None):
self.msg = msg
self.lineno = lineno
if filename is not None:
self.filename = filename
def __str__(self):
return "TranslationError in %s at line %s: %s" % (
self.filename, self.lineno, self.msg,
)
def __repr__(self):
return "<TranslationError %s,%s: %s" % (
self.filename, self.lineno, self.msg,
)
def __init__(self, srcfile, module_name, options):
#sys.stderr.write('module_name: %s\n' % module_name)
self.op_names2 = op_names2.copy()
self.lines = []
self.imported_modules = {}
self.imported_js = {}
self.indent_level = 0
self.depth = 0
self.names = [Names()]
self.const_int = {}
self.const_long = {}
self.const_float = {}
self.const_str = {}
self.tmp_jsname = {}
self.assign_state = False
self.inloop = 0
self.next_func_type = [func_type['function']]
self.last_lineno = 0
self.jsvars = self.jsvars.copy()
self.srcfile = srcfile
self.tree = None
self.driver = None
self.TranslationError.filename = srcfile
if not module_name:
module_name, extension = os.path.splitext(os.path.basename(srcfile))
self.jsvars['module_name'] = module_name
self.options = TranslateOptions(**options)
def ast_tree_creator(self, srcfile=None):
if srcfile is None:
srcfile = self.srcfile
if self.driver is None:
self.driver = Driver(pygram.python_grammar, pytree.convert)
return self.driver.parse_file(srcfile)
def tree_merge(self, dst, src, flags=None):
if flags and 'FULL_OVERRIDE' in flags:
return src
for child in src.children:
if isinstance(child, Node):
if type_repr(child.type) == 'funcdef':
self.tree_replace_function(dst, child)
elif type_repr(child.type) == 'classdef':
self.tree_merge_class(dst, child)
return dst
def tree_merge_class(self, dst, src):
if isinstance(src.children[0], Leaf) and \
src.children[0].value == 'class' and \
isinstance(src.children[1], Leaf) and \
isinstance(src.children[-1], Node):
class_name = src.children[1].value
if type_repr(src.children[-1].type) == 'suite':
src_children = src.children[-1].children
else:
src_children = [src.children[-1]]
else:
raise self.TranslationError(
"Cannot merge class %r" % src
)
for dst_child in dst.children:
if type_repr(dst_child.type) == 'classdef' and \
dst_child.children[0].value == 'class' and \
isinstance(dst_child.children[1], Leaf) and \
dst_child.children[1].value == class_name and \
isinstance(dst_child.children[-1], Node) and \
type_repr(dst_child.children[-1].type) == 'suite':
dst_node = dst_child.children[-1]
for src_child in src_children:
if type_repr(src_child.type) == 'funcdef':
self.tree_replace_function(dst_node, src_child)
elif type_repr(src_child.type) == 'simple_stmt':
self.tree_replace_stmt(dst_node, src_child)
return
raise self.TranslationError(
"Cannot find class %r for merge" % class_name
)
def tree_replace_function(self, dst, func_node):
if isinstance(func_node.children[0], Leaf) and \
func_node.children[0].value == 'def' and \
isinstance(func_node.children[1], Leaf):
func_name = func_node.children[1].value
else:
raise self.TranslationError(
"Cannot replace function %r" % func_node
)
for child in dst.children:
if isinstance(child, Node) and \
type_repr(child.type) == 'funcdef':
if isinstance(child.children[0], Leaf) and \
child.children[0].value == 'def' and \
isinstance(func_node.children[1], Leaf) and \
child.children[1].value == func_name:
child.children = func_node.children
child.changed()
return
# Next two lines will append a function if it's not found,
# but that's different behavior then in the other translator
#dst.append_child(func_node)
#return
raise self.TranslationError(
"Cannot find function %r for replace" % func_name
)
def tree_replace_stmt(self, dst, stmt_node):
if isinstance(stmt_node.children[0], Leaf):
if stmt_node.children[0].value == 'pass':
return
else:
node = stmt_node.children[0]
if type_repr(node.type) == 'expr_stmt':
if isinstance(node.children[0], Leaf) and \
isinstance(node.children[1], Leaf) and \
node.children[1].value == '=':
for child in dst.children:
if isinstance(child, Node) and \
type_repr(child.type) == 'simple_stmt' and \
isinstance(child.children[0], Node) and \
type_repr(child.children[0].type) == 'expr_stmt':
dst_node = child.children[0]
if isinstance(dst_node.children[0], Leaf) and \
isinstance(dst_node.children[1], Leaf) and \
dst_node.children[1].value == '=' and \
dst_node.children[0].value == node.children[0].value:
dst_node.children = node.children
dst_node.changed()
return
dst.append_child(stmt_node)
return
raise self.TranslationError(
"Cannot replace or merge statement %r" % stmt_node
)
def dispatch_file(self, tree):
if isinstance(tree, Leaf):
assert tree.value == '', self.TranslationError(repr(tree.value), self.get_lineno(tree))
tree = new_node('file_input')
return self.dispatch(tree)
def get_javascript(self):
code = []
for indent_level, line in self.lines:
code.append(
'%s%s' % (
self.indent_str * indent_level,
line,
)
)
return '\n'.join(code)
def add_import(self, modname, fromlist=None):
# Refuse to report pyjslib as imported module
if modname == 'pyjslib':
return
if fromlist is None:
fromlist = [None]
fl = self.imported_modules.get(modname, None)
if fl is None:
self.imported_modules[modname] = fromlist
for f in fromlist:
if f is not None:
self.add_import("%s.%s" % (modname, f))
else:
for f in fromlist:
if not f in fl:
fl.append(f)
if f is not None:
self.add_import("%s.%s" % (modname, f))
def indent(self, n=1):
self.indent_level += n
return self.indent_level
def dedent(self, n=1):
indent_level = self.indent_level
self.indent_level -= n
assert self.indent_level >= 0, self.TranslationError("indent_level: %d" % self.indent_level, self.get_lineno(self.last_lineno))
return indent_level
def get_lineno(self, node):
if getattr(node, 'lineno', None) is not None:
return node.lineno
#if isinstance(node, Leaf) or isinstance(node, Code):
# return node.lineno
for child in node.children:
if getattr(child, 'lineno', None) is not None:
return child.lineno
#if isinstance(child, Leaf) or isinstance(child, Code):
# return child.lineno
lineno = self.get_lineno(child)
if lineno is not None:
return lineno
def track_lineno(self, node):
return self.get_lineno(node)
def add_lines(self, lines, lineno=None, split=True):
if lineno != None:
track = "%(track)s['lineno'] = " % self.jsvars
while len(self.lines) > 0 and \
self.lines[-1][1].strip().startswith(track):
self.lines.pop()
line = lines[0]
level = self.indent_level
if line and line[0] == '+':
while line[0] == '+':
level += 1
line = line[1:]
self.lines.append([level, '%s%s;' % (track, lineno)])
if split:
lines = lines.split("\n")
for line in lines:
level = self.indent_level
if line and line[0] == '+':
while line[0] == '+':
level += 1
line = line[1:]
line = line.lstrip()
self.lines.append([level, line])
def add_name(self, name, reflineno=None, glob=False, to_js=None, force=False):
#if not force and self.assign_state is False:
# return
if not force and \
(self.assign_state is False and \
isinstance(self.names[-1], ClassNames)):
return
depth = 0
for names in self.names:
if not isinstance(names, ClassNames):
depth += 1
if name in self.names[-1]:
_name = self.names[-1][name]
if reflineno is None and len(self.names) > 1 and \
not _name.glob and _name.reflineno is not None and \
_name.depth == depth and force is False:
print _name.name, _name.reflineno, _name.glob, _name.depth, len(self.names) - 1
reflineno = _name.reflineno
_name.reflineno = None
raise self.TranslationError(
"Local variable '%s' referenced before assignment at line %s" % (name, reflineno)
)
else:
done = []
if len(self.names) == 1:
_name = Name(name, reflineno, glob, to_js)
_name.depth = 1
elif reflineno is not None:
_name = None
names = self.names[:-1]
while len(names):
if name in names[-1]:
_name = names[-1][name]
break
done.append(names.pop())
if _name is None:
_name = Name(name, reflineno, to_js=to_js)
_name.depth = 1
else:
_name = Name(name, reflineno, glob, to_js)
_name.depth = depth
#_name.depth = 0
#for names in self.names:
# if not isinstance(names, ClassNames):
# _name.depth += 1
if reflineno is not None and _name.depth == 0:
if not _name.builtin and name in builtin_names:
_name.builtin = True
if not force and self.assign_state is False and \
_name.depth == 1 and \
len(self.names) > 1:
return
while len(done):
done[-1][name] = _name
done.pop()
self.names[-1][name] = _name
def get_names_depth(self, skip_class_names=True):
depth = 0
for names in self.names:
if skip_class_names and isinstance(names, ClassNames):
continue
depth += 1
return depth
def get_jsname(self, name):
if isinstance(name, Code):
return str(name)
jsvars = self.jsvars.copy()
jsvars.update(name=name)
jsname = None
if name in self.names[-1]:
if self.names[-1][name].to_js is not None:
return self.names[-1][name].to_js
if not self.names[-1][name].glob:
if isinstance(self.names[-1], ClassNames):
jsname = """\
%(funcbase)s['$dict'][%(name)r]""" % jsvars
else:
jsname = "%(locals)s[%(name)r]" % jsvars
jsvars.update(jsname=jsname)
else:
for names in self.names[1:-1]:
if isinstance(names, ClassNames):
continue
if name in names:
if isinstance(self.names[-1], ClassNames):
cn = self.names.pop()
else:
cn = None
self.add_name(name)
jsname = self.get_jsname(name)
if cn is not None:
self.names.append(cn)
return jsname
if jsname is None:
if name in ['globals', 'locals']:
src = "%%(%s)s" % name
return ("%(builtin)s['%(name)s'](" + src + ")") % jsvars
if name in self.names[0] and self.names[0][name].to_js is not None:
return self.names[0][name].to_js
jsname = '%(globals)s[%(name)r]' % (jsvars)
if self.options.check_defined:
return "(typeof %(jsname)s != 'undefined' ? %(jsname)s : %(builtin)s['UnboundLocalError'](%(name)r)" % jsvars
return jsname
def get_tmp_jsname(self, prefix):
if not prefix in self.tmp_jsname:
self.tmp_jsname[prefix] = 1
else:
self.tmp_jsname[prefix] += 1
return '%s%d' % (prefix, self.tmp_jsname[prefix])
def add_const_int(self, value):
value = str(value)
if not value in self.const_int:
name = '%s.i%d' % (
self.jsvars['constants'],
len(self.const_int),
)
self.const_int[value] = name
return self.const_int[value]
def add_const_long(self, value):
value = str(long(value))
if not value in self.const_long:
name = '%s.l%d' % (
self.jsvars['constants'],
len(self.const_long),
)
self.const_long[value] = name
return self.const_long[value]
def add_const_float(self, value):
if not value in self.const_float:
name = '%s.f%d' % (
self.jsvars['constants'],
len(self.const_float),
)
self.const_float[value] = name
return self.const_float[value]
def add_const_str(self, value):
if not value in self.const_str:
name = '%s.s%d' % (
self.jsvars['constants'],
len(self.const_str),
)
self.const_str[value] = name
return self.const_str[value]
def assert_value(self, node, value, expected_value):
if isinstance(expected_value, list) or \
isinstance(expected_value, tuple):
assert value in expected_value, self.TranslationError(
"one of %r expected, got '%s'" % (expected_value, value),
self.get_lineno(node),
)
else:
assert value == expected_value, self.TranslationError(
"%r expected, got '%s'" % (expected_value, value),
self.get_lineno(node),
)
def assert_type(self, node, type_, expected_type):
type_ = type_repr(type_)
assert type_ == expected_type, self.TranslationError(
"'%s' expected, got '%s'" % (expected_type, type_),
self.get_lineno(node),
)
def assert_instance(self, node, inst, expected_class):
assert isinstance(inst, expected_class), self.TranslationError(
"instance of '%s' expected, got '%r'" % (expected_class.__name__, inst),
self.get_lineno(node),
)
def assert_dedent(self, level, expected_level):
assert level == expected_level, self.TranslationError(
"expected dedent %s, got %s" % (expected_level, level),
self.last_lineno,
)
def dispatch(self, x, assign=False):
if isinstance(x, Code):
return x
assign_state = self.assign_state
if assign:
self.assign_state = assign
lineno = self.get_lineno(x)
if lineno is not None:
self.last_lineno = lineno
else:
lineno = self.last_lineno
try:
if isinstance(x, Node):
visit = getattr(self, 'node_%s' % type_repr(x.type), None)
if visit is None:
raise self.TranslationError("No name for node type %s at %r" % (x.type, x))
else:
visit = getattr(self, 'leaf_%s' % type_repr(x.value), None)
if visit is None:
if x.type in leaf_type:
visit = getattr(self, 'leaftype_%s' % leaf_type.get(x.type), None)
if visit is None:
if x.value in ['', '\n', '\r\n', ';']:
self.assign_state = assign_state
return
raise self.TranslationError("No name for leaf type %s at %r" % (x.type, x))
code = visit(x)
self.assign_state = assign_state
return code
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
if isinstance(exc_value, self.TranslationError):
if exc_value.lineno is None:
exc_value.lineno = lineno
raise
else:
print "Error in %s at %s:" % (self.srcfile, lineno)
raise
def not_implemented(self, node):
print repr(node)
print dir(node)
raise NotImplementedError(repr(node))
def get_test(self, node, name=None):
#print 'get_test:', repr(node)
if isinstance(node, Node):
t = type_repr(node.type)
if t in ['and_test', 'or_test', 'not_test']:
if name is None:
return getattr(self, 'node_%s' % t)(node, True)
return getattr(self, 'node_%s' % t)(node, name=name)
elif t in ['comparison']:
test = self.dispatch(node)
if name is None:
return "%s.valueOf()" % test
else:
return "(%s=%s).valueOf()" % (name, test)
test = self.dispatch(node)
if name is None:
return "%s(%s)" % (self.jsvars['booljs'], test)
return "%s(%s=%s)" % (self.jsvars['booljs'], name, test)
def get_bit_expr(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
args = [self.dispatch(childs.next())]
op = child = childs.next()
while child is not None:
op = child
op_name = self.op_names2.get(op.value, None)
if op is None:
self.not_implemented(node)
try:
while True:
args.append(self.dispatch(childs.next()))
child = childs.next()
if child.value != op.value:
break
except StopIteration:
child = None
if len(args) == 2:
left, right = args
jsvars.update(locals())
code = "%(builtin)s['%(op_name)s2'](%(left)s, %(right)s)" % jsvars
else:
args = ', '.join(str(i) for i in args)
jsvars.update(locals())
code = "%(builtin)s['%(op_name)s']([%(args)s])" % jsvars
args = [code]
return code
def get_op_item(self, node, op_item, op_slice):
op = op_item
if isinstance(node, Leaf) and \
node.value == ':':
what = Slice((
self.add_const_int(0),
self.add_const_int(2147483647),
))
else:
what = self.dispatch(node)
if isinstance(what, Slice):
slice = []
for i in what.items:
if i is None:
slice.append(self.jsvars['None'])
else:
slice.append(i)
what = "[%s]" % ', '.join(slice)
op = op_slice
return op, what
def collect_locals(self, node):
for child in node.children:
if isinstance(child, Node):
if type_repr(child.type) == 'funcdef':
self.add_name(child.children[1].value, force=True)
elif type_repr(child.type) == 'classdef':
self.add_name(child.children[1].value, force=True)
elif type_repr(child.type) == 'suite':
self.collect_locals(child)
elif type_repr(child.type) == 'simple_stmt':
child0 = child.children[0]
if isinstance(child0, Node):
children = child0.children
if type_repr(child0.type) == 'expr_stmt':
if isinstance(children[1], Leaf) and \
children[1].value == '=':
if isinstance(children[0], Leaf):
self.add_name(children[0].value, force=True)
elif type_repr(child0.type) == 'global_stmt':
for c in children:
if c.value == ',':
continue
self.add_name(c.value, glob=True, force=True)
return
def create_assign_stmt(self, lhs='lhs', rhs='rhs', subst_lhs=None, subst_rhs=None):
stmt = self.driver.parse_string(
"%(lhs)s = %(rhs)s\n" % locals(),
True,
).children[0]
# Node(simple_stmt, [Node(expr_stmt, [Leaf(1, 'lhs'), Leaf(22, '='), Leaf(1, 'rhs')]), Leaf(4, '\n')])
if subst_lhs is not None:
children = stmt.children[0].children
if isinstance(subst_lhs, basestring):
children[0].value = subst_lhs
else:
children[0] = subst_lhs
if subst_rhs is not None:
children = stmt.children[0].children
if isinstance(subst_rhs, basestring):
children[2].value = subst_rhs
else:
children[2] = subst_rhs
return stmt
def create_call_stmt(self, fname='fn', args=None, subst_base=None, subst_args=None):
if args is None or len(args) == 0:
a = ''
else:
a = []
for arg in args:
if isinstance(arg, Node) or isinstance(arg, Leaf):
a.append('1')
else:
a.append(arg)
a = ', '.join(a)
stmt = self.driver.parse_string(
"%(fname)s(%(a)s)\n" % locals(),
True,
).children[0]
# driver.parse_string("fn()\n", True).children[0]
# Node(simple_stmt, [Node(power, [Leaf(1, 'fn'), Node(trailer, [Leaf(7, '('), Leaf(8, ')')])]), Leaf(4, '\n')]), Leaf(0, '')
# driver.parse_string("fn(1)\n", True).children[0]
# Node(simple_stmt, [Node(power, [Leaf(1, 'fn'), Node(trailer, [Leaf(7, '('), Leaf(2, '1'), Leaf(8, ')')])]), Leaf(4, '\n')]), Leaf(0, '')
# driver.parse_string("fn(1,2)\n", True).children[0]
# Node(simple_stmt, [Node(power, [Leaf(1, 'fn'), Node(trailer, [Leaf(7, '('), Node(arglist, [Leaf(2, '1'), Leaf(12, ','), Leaf(2, '2')]), Leaf(8, ')')])]), Leaf(4, '\n')]), Leaf(0, '')
trailer_childs = stmt.children[0].children[-1].children
if args is None or len(args) == 0:
pass
elif len(args) == 1:
arg = args[0]
if isinstance(arg, Node) or isinstance(arg, Leaf):
trailer_childs[1] = arg
else:
self.assert_type(stmt, trailer_childs[1].type, 'arglist')
arglist_childs = trailer_childs[1].children
for idx, arg in enumerate(args):
idx *= 2
if isinstance(arg, Node) or isinstance(arg, Leaf):
arglist_childs[idx] = arg
if subst_base is not None:
if isinstance(subst_base, basestring):
stmt.children[0].children[0].value = subst_base
else:
stmt.children[0].children[0] = subst_base
if subst_args is not None:
self.assert_type(stmt, trailer_childs[1].type, 'arglist')
arglist_childs = trailer_childs[1].children
if len(args) == 1:
arg = args[0]
if arg is None:
pass
elif isinstance(arg, basestring):
trailer_childs[1].value = arg
else:
trailer_childs[1] = arg
elif len(args) > 1:
for idx, arg in enumerate(subst_args):
idx *= 2
if arg is None:
pass
elif isinstance(arg, basestring):
arglist_childs[idx].value = arg
else:
arglist_childs[idx] = arg
return stmt
def create_getindex_stmt(self, base='base', idx='idx', subst_base=None, subst_idx=None):
stmt = self.driver.parse_string(
"%(base)s[%(idx)s]\n" % locals(),
True,
).children[0]
# driver.parse_string("base[idx]\n", True).children[0]
# Node(simple_stmt, [Node(power, [Leaf(1, 'base'), Node(trailer, [Leaf(9, '['), Leaf(1, 'idx'), Leaf(10, ']')])]), Leaf(4, '\n')]), Leaf(0, '')
if subst_base is not None:
children = stmt.children[0].children
if isinstance(subst_base, basestring):
children[0].value = subst_base
else:
children[0] = subst_base
if subst_idx is not None:
children = stmt.children[0].children[1].children
if isinstance(subst_idx, basestring):
children[1].value = subst_idx
else:
children[1] = subst_idx
return stmt
def node_and_expr(self, node):
return self.get_bit_expr(node)
def node_and_test(self, node, jsbool=False, name=None):
jsvars = self.jsvars.copy()
args = []
childs = node.children.__iter__()
if jsbool:
name = None
elif name is None:
name = self.get_tmp_jsname('and$')
self.add_lines("var %s" % name)
args.append(self.get_test(childs.next(), name))
try:
while True:
self.assert_value(node, childs.next().value, 'and')
args.append(self.get_test(childs.next(), name))
except StopIteration:
pass
test = ' && '.join(args)
if jsbool:
return test
return '(%s ? %s : %s)' % (test, name, name)
def node_arglist(self, node):
childs = node.children.__iter__()
arglist = []
try:
while True:
child = childs.next()
if isinstance(child, Leaf):
if child.value == ',':
continue
if child.value in ['*', '**']:
value = self.dispatch(childs.next())
arglist.append(Argument(child.value, value))
continue
arglist.append(self.dispatch(child))
else:
arglist.append(self.dispatch(child))
except StopIteration:
pass
return arglist
def node_argument(self, node):
if isinstance(node.children[1], Leaf):
if node.children[1].value == '=':
if len(node.children) == 3:
name = node.children[0].value
value = self.dispatch(node.children[2])
return Argument(name, value)
else:
if type_repr(node.children[1].type) == 'comp_for':
return self.node_listmaker(node)
self.not_implemented(node)
def node_arith_expr(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
left = self.dispatch(childs.next())
try:
while True:
op = childs.next()
op = self.op_names2.get(op.value, None)
if op is None:
self.not_implemented(node)
right = self.dispatch(childs.next())
jsvars.update(locals())
left = "%(builtin)s['%(op)s'](%(left)s, %(right)s)" % jsvars
except StopIteration:
pass
return left
def node_assert_stmt(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'assert')
test = self.dispatch(childs.next())
arg = ''
if len(node.children) > 2:
child = childs.next()
self.assert_value(node, child.value, ',')
arg = ', %s' % self.dispatch(childs.next())
jsvars.update(locals())
# TODO: return %(builtin)s['raise'](%(builtin)s['AssertionError']%(arg)s, %(None)s)
return """if (!%(booljs)s(%(test)s)) {
+ return %(builtin)s['raise'](%(module)s['$new'](%(builtin)s['AssertionError']%(arg)s));
}""" % jsvars
def node_atom(self, node):
jsvars = self.jsvars.copy()
items = []
cls = None
if node.children[0].value == '(':
cls = jsvars['tuple']
if len(node.children) == 3:
items = self.dispatch(node.children[1])
if not isinstance(items, list):
return items
elif len(node.children) != 2:
self.not_implemented(node)
elif node.children[0].value == '[':
cls = jsvars['list']
if len(node.children) == 3:
items = self.dispatch(node.children[1])
if not isinstance(items, list):
if isinstance(node.children[1], Leaf):
items = [items]
elif type_repr(node.children[1].type) == 'listmaker':
pass
else:
items = [items]
elif len(node.children) != 2:
self.not_implemented(node)
elif node.children[0].value == '{':
cls = jsvars['dict']
if len(node.children) == 3:
items = self.dispatch(node.children[1])
if not isinstance(items, dict):
self.not_implemented(node)
elif len(node.children) != 2:
self.not_implemented(node)
if items:
items = ["[%s, %s]" % (k, v) for k, v in items.iteritems()]
else:
items = []
elif node.children[0].value == '`':
assert len(node.children) == 3
what = self.dispatch(node.children[1])
jsvars.update(locals())
return "%(builtin)s['repr'](%(what)s)" % jsvars
elif leaf_type.get(node.children[0].type) == 'str':
s = ''
for child in node.children:
assert leaf_type.get(child.type) == 'str'
s += child.value
return self.add_const_str(eval(s))
else:
self.not_implemented(node)
if isinstance(items, list):
items = '[%s]' % ', '.join([str(i) for i in items])
jsvars.update(locals())
return "%(module)s['$new'](%(cls)s, %(items)s)" % jsvars
def node_augassign(self, node):
self.not_implemented(node)
def node_break_stmt(self, node):
return 'break'
def node_classdef(self, node):
jsvars = self.jsvars.copy()
#print node.depth()
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'class')
name = childs.next().value
self.add_name(name)
tok = childs.next()
if tok.value == ':':
bases = self.get_jsname('object')
else:
self.assert_value(node, tok.value, '(')
bases = childs.next()
if isinstance(bases, Leaf):
if bases.value == ')':
bases = None
else:
bases = [self.get_jsname(bases.value)]
else:
bases = self.dispatch(bases)
if bases is None:
bases = self.get_jsname('object')
elif isinstance(bases, list):
bases = ', '.join([str(i) for i in bases])
self.assert_value(node, childs.next().value, ')')
else:
self.assert_value(node, childs.next().value, ')')
self.assert_value(node, childs.next().value, ':')
lineno = self.track_lineno(node)
jsvars.update(locals())
if isinstance(self.names[-1], ClassNames):
namespace = "%(funcbase)s['$dict']" % jsvars
else:
namespace = "%(locals)s" % jsvars
jsvars.update(namespace=namespace)
self.add_lines("""\
%(namespace)s[%(name)r] = %(builtin)s['B$type'](%(module)s, %(name)r, [%(bases)s], {});
(function(%(funcbase)s){
+ //var %(locals)s = %(funcbase)s['$dict'];""" % jsvars)
names = ClassNames()
self.names.append(names)
indent_level = self.indent()
self.next_func_type.append(func_type['function'])
try:
while True:
child = childs.next()
self.dispatch(child)
except StopIteration:
pass
self.next_func_type.pop()
self.assert_dedent(self.dedent(), indent_level)
assert names is self.names.pop(), self.TranslationError("names pop error", self.get_lineno(node))
if '__slots__' in names:
self.add_lines("""\
+ %(funcbase)s['__slots__'] = %(module)s['$new'](%(tuple)s, %(locals)s['__slots__']).__array;\
""" % jsvars)
self.add_lines("})(%(namespace)s[%(name)r]);" % jsvars)
return name
def node_comp_for(self, node):
assert False, "Shouldn't get here..."
def node_comp_if(self, node):
assert False, "Shouldn't get here..."
def node_comp_iter(self, node):
self.not_implemented(node)
def node_comp_op(self, node):
if node.children[0].value == 'is':
if node.children[1].value == 'not':
return op_compare['is not']
elif node.children[0].value == 'not':
if node.children[1].value == 'in':
return op_compare['not in']
self.not_implemented(node)
def node_comparison(self, node):
jsvars = self.jsvars.copy()
left = op = right = None
childs = node.children.__iter__()
first_left = left = self.dispatch(childs.next())
prev_right = None
cmp_expr = []
tmp = None
try:
while True:
op_node = childs.next()
right = self.dispatch(childs.next())
if isinstance(op_node, Leaf):
op = op_compare[op_node.value]
elif type_repr(op_node.type) == 'comp_op':
op = self.dispatch(op_node)
jsvars.update(locals())
if prev_right is None:
cmp_expr.append("""\
%(builtin)s['%(op)s'](%(left)s, %(right)s)""" % jsvars)
else:
if tmp is None:
tmp = self.get_tmp_jsname('comp$')
jsvars['tmp'] = tmp
self.add_lines("var %s;" % tmp)
cmp_expr = ["""\
%(builtin)s['%(op)s'](%(first_left)s, %(tmp)s=%(prev_right)s)""" % jsvars]
cmp_expr.append("""\
%(builtin)s['%(op)s'](%(tmp)s, %(tmp)s=%(right)s)""" % jsvars)
left = right
prev_right = right
except StopIteration:
pass
if cmp_expr:
if len(cmp_expr) == 1:
return cmp_expr[0]
s = ' && '.join([
"(%s).valueOf()" % i
for i in cmp_expr
])
jsvars.update(locals())
return "(%(s)s ? %(True)s : %(False)s)" % jsvars
self.not_implemented(node)
def node_compound_stmt(self, node):
self.not_implemented(node)
def node_continue_stmt(self, node):
self.not_implemented(node)
def node_decorated(self, node):
self.assert_instance(node, node.children[0], Node)
self.assert_instance(node, node.children[1], Node)
assert len(node.children) == 2
decorators = self.dispatch(node.children[0])
func = node.children[1]
if not isinstance(decorators, list):
decorators = [decorators]
lineno = self.track_lineno(decorators[0])
next_func_type = None
if isinstance(self.names[-1], ClassNames):
next_func_type = func_type['function']
if decorators[-1].name == 'staticmethod':
next_func_type = func_type['staticmethod']
elif decorators[-1].name == 'classmethod':
next_func_type = func_type['classmethod']
self.next_func_type.append(next_func_type)
self.assert_value(func, func.children[0].value, 'def')
name = func.children[1].value
self.add_name(name, self.track_lineno(func), force=True)
src = self.get_jsname(name)
dst = "%s = " % src
jsvars = self.jsvars.copy()
for decorator in decorators:
if decorator is decorators[-1] and \
next_func_type is not None and \
decorator.name in ['staticmethod', 'classmethod']:
pass
else:
deco = decorator.name.split('.')
if len(deco) == 1:
deco = self.get_jsname(decorator.name)
else:
dst = ''
deco = "%s(%s, ['%s'])" %(
jsvars['getattr'],
self.get_jsname(deco[0]),
"', '".join(deco[1:]),
)
jsvars.update(locals())
self.add_lines("""\
%(src)s = %(fcall)s(%(module)s, %(lineno)s, %(deco)s, null,""" % jsvars)
dst = ''
self.indent()
name = self.dispatch(func)
for decorator in decorators:
if decorator is decorators[-1] and \
next_func_type is not None and \
decorator.name in ['staticmethod', 'classmethod']:
pass
else:
self.dedent()
# remove trailing ';' from last line
assert self.lines[-1][1][-1] == ';'
self.lines[-1][1] = self.lines[-1][1][:-1]
self.add_lines(");")
if next_func_type is not None:
self.next_func_type.pop()
return
def node_decorator(self, node):
self.assert_value(node, node.children[0].value, '@')
self.assert_value(node, node.children[-1].value, ['\n', '\r\n'])
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, '@')
child = childs.next()
if isinstance(child, Leaf):
name = child.value
else:
name = self.dispatch(child)
# TODO / FIXME : handle x.setter etc
name = '.'.join(name)
self.assert_value(node, childs.next().value, ['\n', '\r\n'])
return Decorator(name, node.children[0].lineno)
def node_decorators(self, node):
decorators = []
for child in node.children:
decorators.append(self.dispatch(child))
return decorators
def node_del_stmt(self, node):
# del a
# del a.b
# del a.b[1]
# del a, b
# del a[:]
# a = [0,1,2,3,4,5,6,7,8,9] ; del a[1:8:2]
# Node(del_stmt, [Leaf(1, 'del'), Leaf(1, 'a')])
# Node(del_stmt, [Leaf(1, 'del'), Node(power, [Leaf(1, 'a'), Node(trailer, [Leaf(23, '.'), Leaf(1, 'b')])])])
# Node(del_stmt, [Leaf(1, 'del'), Node(power, [Leaf(1, 'a'), Node(trailer, [Leaf(23, '.'), Leaf(1, 'b')]), Node(trailer, [Leaf(9, '['), Leaf(2, '1'), Leaf(10, ']')])])])
jsvars = self.jsvars.copy()
lineno = self.track_lineno(node)
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'del')
child = childs.next()
if isinstance(child, Node) and \
type_repr(child.type) == 'exprlist':
childs = child.children.__iter__()
child = childs.next()
try:
while True:
if isinstance(child, Leaf):
name = self.get_jsname(child.value)
self.add_lines("delete %s;" % name, lineno)
elif type_repr(child.type) == 'power':
jsvars = self.jsvars.copy()
tail = child.children.pop()
if len(child.children) > 1:
base = self.dispatch(child)
else:
base = self.dispatch(child.children[0])
if type_repr(tail.type) == 'trailer':
what = tail.children[1]
if tail.children[0].value == '.':
# delattr
op = 'delattr'
if isinstance(what, Leaf):
what = repr(what.value)
else:
what = self.dispatch(what)
elif tail.children[0].value == '[':
op, what = self.get_op_item(
what, 'delitem', 'delslice',
)
else:
self.not_implemented(node)
jsvars.update(locals())
self.add_lines("""\
%(builtin)s['%(op)s'](%(base)s, %(what)s)\
""" % jsvars)
return
else:
self.not_implemented(node)
self.assert_value(node, childs.next().value, ',')
child = childs.next()
except StopIteration:
pass
def node_dictsetmaker(self, node):
values = {}
childs = node.children.__iter__()
try:
while True:
k = self.dispatch(childs.next())
self.assert_value(node, childs.next().value, ':')
v = self.dispatch(childs.next())
values[k] = v
self.assert_value(node, childs.next().value, ',')
except StopIteration:
pass
return values
def node_dotted_as_name(self, node):
assert len(node.children) == 3
if isinstance(node.children[0], Leaf):
mod = [node.children[0].value]
else:
mod = self.dispatch(node.children[0])
name = node.children[2].value
return Import(mod, name)
def node_dotted_as_names(self, node):
imports = []
childs = node.children.__iter__()
try:
while True:
child = childs.next()
if isinstance(child, Leaf):
if child.value == ',':
continue
imports.append(Import([child.value]))
else:
imp = self.dispatch(child)
if isinstance(imp, list):
imp = Import(imp)
else:
imp = Import([imp])
imports.append(imp)
except StopIteration:
pass
return imports
def node_dotted_name(self, node):
names = []
for child in node.children:
if child.value == '.':
continue
names.append(child.value)
return names
def node_encoding_decl(self, node):
self.not_implemented(node)
def node_eval_input(self, node):
self.not_implemented(node)
def node_except_clause(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'except')
classes = None
varname = None
try:
child = childs.next()
if isinstance(child, Node) and type_repr(child.type) == 'atom':
self.assert_value(node, child.children[0].value, '(')
self.assert_value(node, child.children[-1].value, ')')
classes = self.dispatch(child.children[1])
else:
classes = self.dispatch(child)
self.assert_value(node, childs.next().value, ',')
varname = self.dispatch(childs.next(), assign=True)
except StopIteration:
pass
if classes is not None:
if not isinstance(classes, list):
classes = [classes]
tests = []
return classes, varname
return None, None
def node_exec_stmt(self, node):
self.not_implemented(node)
def node_expr(self, node):
if len(node.children) > 2:
if isinstance(node.children[1], Leaf):
if node.children[1].value == '|':
return self.get_bit_expr(node)
self.not_implemented(node)
def _assign(self, node, left, rest):
jsvars = self.jsvars.copy()
lineno = self.track_lineno(node)
if len(rest) == 1:
right = self.dispatch(rest[0])
else:
self.assert_value(node, rest[1].value, '=')
right = self._assign(node, rest[0], rest[2:])
if not isinstance(left, Node):
if isinstance(self.names[-1], ClassNames):
self.dispatch(left, assign=right)
left = left.value
jsvars.update(locals())
return "%(setattr)s(%(funcbase)s, ['%(left)s'], %(right)s)" % jsvars
left = self.dispatch(left, assign=right)
elif type_repr(left.type) in ['testlist_star_expr', 'atom', 'exprlist']:
# Assignment to tuple or list (aka tuple assignment)
# children for:
# a, b = foo
# [Node(testlist_star_expr, [Leaf(1, 'a'), Leaf(12, ','), Leaf(1, 'b')]), Leaf(22, '='), Leaf(1, 'foo')]
# (a, b) = foo
# [Node(atom, [Leaf(7, '('), Node(testlist_gexp, [Leaf(1, 'a'), Leaf(12, ','), Leaf(1, 'b')]), Leaf(8, ')')]), Leaf(22, '='), Leaf(1, 'foo')]
# for k, v in foo:
# pass
# [Node(exprlist, [Leaf(1, 'k'), Leaf(12, ','), Leaf(1, 'v')]), Leaf(22, '='), Leaf(1, 'for$1')]
head_lines = []
lines = []
def tuple_assign(left, right):
if type_repr(left.type) == 'atom':
children = left.children[1:-1]
if isinstance(children[0], Node) and \
type_repr(children[0].type) in ['testlist_gexp', 'listmaker']:
children = children[0].children[:]
else:
children = left.children[:]
n = len(children) / 2 + 1
rhs = self.get_tmp_jsname('tplass$')
jsvars.update(locals())
self.add_lines("var %(rhs)s;" % jsvars)
head_lines.append(
"%(rhs)s = %(builtin)s['tplass'](%(right)s, %(n)s);" % jsvars
)
idx = -1
for child in children:
if isinstance(child, Leaf) and child.value == ',':
continue
idx += 1
right = self.create_getindex_stmt(
idx=1,
subst_base=rhs,
subst_idx=str(idx),
).children[0]
if isinstance(child, Node) and type_repr(child.type) == 'atom':
self.assert_value(node, child.children[0].value, ['(', '['])
right = self.dispatch(right)
tuple_assign(child, right)
else:
self.dispatch(child, assign=right)
n = self.create_assign_stmt(
subst_lhs=child,
subst_rhs=right,
).children[0]
line = self.node_expr_stmt(n)
if line[-1] != ';':
line += ';'
lines.append(line)
tuple_assign(left, right)
lines = head_lines + lines
return '\n'.join(lines)
elif type_repr(left.type) == 'power':
tail = left.children.pop()
if len(left.children) > 1:
base = self.dispatch(left)
else:
base = self.dispatch(left.children[0])
if isinstance(base, Code):
base = str(base)
self.assert_instance(node, base, basestring)
args = []
if type_repr(tail.type) == 'trailer':
what = tail.children[1]
if tail.children[0].value == '.':
# setattr
op = 'setattr'
if isinstance(what, Leaf):
what = repr(what.value)
else:
what = self.dispatch(what)
what = "[%s]" % what
elif tail.children[0].value == '[':
op, what = self.get_op_item(
what, 'setitem', 'setslice',
)
else:
self.not_implemented(node)
jsvars.update(locals())
return """\
%(builtin)s['%(op)s'](%(base)s, %(what)s, %(right)s)\
""" % jsvars
else:
self.not_implemented(node)
else:
left = self.dispatch(left)
return '%s = %s' % (left, right)
def node_expr_stmt(self, node):
jsvars = self.jsvars.copy()
lineno = self.track_lineno(node)
if node.children[1].value == '=':
return self._assign(node, node.children[0], node.children[2:])
elif node.children[1].value[-1] == '=' and \
node.children[1].value in self.op_names2:
jsvars = self.jsvars.copy()
op_node = node.children[1]
op = self.op_names2.get(op_node.value, None)
if op is None:
self.not_implemented(node)
right = self.dispatch(node.children[2])
left = node.children[0]
get_left = self.dispatch(left)
jsvars.update(locals())
# lhs = op_X(lhs, rhs)
# Node(simple_stmt, [Node(expr_stmt, [Leaf(1, 'b'), Leaf(22, '='), Leaf(2, '1')]), Leaf(4, '\n')])
if op_node.value[:-1] in ['&', '|', '^', '>>', '<<']:
stmt = "%(builtin)s['%(op)s2'](%(get_left)s, %(right)s)" % jsvars
else:
stmt = "%(builtin)s['%(op)s'](%(get_left)s, %(right)s, true)" % jsvars
if isinstance(left, Leaf):
set_left = left.value
else:
set_left = self.dispatch(left, stmt)
return set_left
stmt = self.create_assign_stmt(
subst_lhs=set_left,
subst_rhs=Code(stmt, lineno),
).children[0]
stmt = self.dispatch(stmt)
return stmt
self.not_implemented(node)
def node_exprlist(self, node):
self.not_implemented(node)
def node_factor(self, node):
if len(node.children) == 2:
if node.children[0].value == '-':
right = node.children[1]
if isinstance(right, Leaf) and \
leaf_type.get(right.type) == 'number':
v = right.value
right.value = '-' + right.value
r = self.dispatch(right)
right.value = v
return r
value = self.dispatch(right)
return "%s['%s'](%s)" % (
self.jsvars['builtin'], op_names1['neg'], value,
)
if node.children[0].value == '~':
right = node.children[1]
value = self.dispatch(right)
return "%s['%s'](%s)" % (
self.jsvars['builtin'], op_names1['inv'], value,
)
self.not_implemented(node)
def node_file_input(self, node):
jsvars = self.jsvars.copy()
# Initialize module creation
self.add_lines("""\
/* <start module: %(module_name)s */ (function(){
var %(module)s = %(module_store)s[%(module_name)r] = {
+ '$inst': true,
+ '$dict': {
++ '__name__': null
+ },
+ 'toString': function() { return "<module " + this['$dict']['__name__'] + ">";},
+ '$module_init': function($name$) {
++ if (%(globals)s['__name__'] !== null) {
+++ return %(module)s;
++ }
++ var $name = $name$;
++ if ($name === null || typeof $name == 'undefined') $name = %(module_name)r;
++ %(builtin)s = %(module_store)s[%(__builtin__)r]['$dict'];
++ %(namestack)s = [%(builtin)s['_builtin_object_'], %(globals)s];""" % jsvars)
if jsvars['module_name'] != jsvars['__builtin__']:
self.add_lines("""\
++ %(module)s['__class__'] = %(builtin)s['module'];
++ %(module)s['$new'] = %(builtin)s['$new'];
++ $name = %(module)s['$new'](%(builtin)s['str'], $name);
++ %(globals)s['__doc__'] = %(None)s = %(builtin)s['None'];
++ %(globals)s['__name__'] = $name;""" % jsvars)
if os.path.basename(self.srcfile) == '__init__.py':
self.add_lines("""\
++ %(globals)s['__package__'] = $name;""" % jsvars)
jsvars.update(locals())
if len(node.children) > 0:
# Initialize the short names and the constants etc.
#
self.add_lines("""\
++ %(locals)s = %(globals)s,
++ %(funcbase)s = %(globals)s,
++ %(globals)s['__builtins__'] = %(module_store)s[%(__builtin__)r]
++ %(track)s['module'] = $name;""" % jsvars)
if jsvars['module_name'] != jsvars['__builtin__']:
# Add builtins to the module object _and_ the globals. On
# deletion of a global, the builtin (if any) has to be
# copied back from module to global.
# TODO: restrict the number of 'copied' builtins
self.add_lines("""\
++ init_short_names$();
++ init_constants$();
++ for (var name in %(namestack)s[0]) {
+++ %(globals)s[name] = %(module_store)s[%(module_name)r][name] = %(namestack)s[0][name];
++ }
++ %(globals)s['__name__'] = %(fcall)s(%(module)s, null, %(builtin)s['str'], null, $name);\
""" % jsvars)
self.indent(2)
# Now the module content
for child in node.children:
self.dispatch(child)
# Close the module creation
self.dedent(2)
self.assert_dedent(self.indent_level, 0)
self.add_lines("""\
++ return %(module)s;
+ }
};
var %(globals)s = %(module_store)s[%(module_name)r]['$dict'];""" % jsvars)
if len(node.children) > 0:
# Declare short names for builtins
self.add_lines("""\
var %(namestack)s, %(locals)s, %(funcbase)s, %(builtin)s, %(fcall)s, %(fcallext)s, %(mcall)s, %(mcallext)s, %(booljs)s,
%(getattr)s, %(setattr)s, %(getitem)s, %(None)s, %(int)s, %(bool)s, %(True)s, %(False)s, %(str)s,
%(constants)s = {};
function init_short_names$(silent) {
+ var builtin = %(builtin)s;
+ try {
++ %(fcall)s = builtin['fcall'];
++ %(fcallext)s = builtin['fcallext'];
++ %(mcall)s = builtin['mcall'];
++ %(mcallext)s = builtin['mcallext'];
++ %(booljs)s = builtin['B$booljs'];
++ %(getattr)s = builtin['getattr'];
++ %(setattr)s = builtin['setattr'];
++ %(getitem)s = 'getitem';
++ %(None)s = builtin['None'];
++ %(int)s = builtin['int'];
++ %(bool)s = builtin['B$bool'];
++ %(True)s = builtin['True'];
++ %(False)s = builtin['False'];
++ %(str)s = builtin['B$str'];
+ } catch (e) {
++ if (silent !== true) {
+++ throw e;
++ }
+ }
};
function init_constants$(silent) {
+ var builtin = %(builtin)s;
+ try {
""" % jsvars)
# Add constants: int
for name, value in sorted([(v, k) for k,v in self.const_int.iteritems()]):
if name is not None:
v = abs(int(value))
if v > (1 << 30):
value = "%s(%r)" % (jsvars['str'], value)
jsvars['value'] = value
jsvars['name'] = name
self.add_lines("""\
++%(name)s = %(fcall)s(%(module)s, null, builtin['int'], null, %(value)s);\
""" % jsvars)
# Add constants: long
for name, value in sorted([(v, k) for k,v in self.const_long.iteritems()]):
if name is not None:
v = abs(int(value))
if v > (1 << 30):
value = "%s(%r)" % (jsvars['str'], value)
jsvars['value'] = value
jsvars['name'] = name
self.add_lines("""\
++%(name)s = %(fcall)s(%(module)s, null, builtin['long'], null, %(value)s);\
""" % jsvars)
# Add constants: float
for name, value in sorted([(v, k) for k,v in self.const_float.iteritems()]):
if name is not None:
jsvars['value'] = value
jsvars['name'] = name
self.add_lines("""\
++%(name)s = %(fcall)s(%(module)s, null, builtin['float'], null, %(value)s);\
""" % jsvars)
# Add constants: str
for name, value in sorted([(v,k) for k,v in self.const_str.iteritems()]):
value = "'%s'" % re_js_string_escape.sub(self.substitute_js_chars, value)
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
if name is not None:
jsvars['value'] = value
jsvars['name'] = name
self.add_lines("""\
++%(name)s = %(str)s(%(value)s);\
""" % jsvars)
self.add_lines("""\
+ } catch (e) {
++ if (silent !== true) {
+++ throw e;
++ }
+ }
};""" % jsvars)
self.add_lines("""\
})();
/* end module: %(module_name)s */
""" % jsvars)
pyjs_deps = self.imported_modules.keys()
if pyjs_deps:
pyjs_deps.sort()
jsvars.update(pyjs_deps=pyjs_deps)
self.add_lines("""\
/*
PYJS_DEPS: %(pyjs_deps)r
*/
""" % jsvars)
pyjs_js = self.imported_js.keys()
if pyjs_js:
pyjs_js.sort()
jsvars.update(pyjs_js=pyjs_js)
self.add_lines("""\
/*
PYJS_JS: %(pyjs_js)r
*/
""" % jsvars)
def node_flow_stmt(self, node):
self.not_implemented(node)
def node_for_stmt(self, node):
jsvars = self.jsvars.copy()
lineno = self.track_lineno(node)
inloop = self.inloop
self.inloop += 1
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'for')
assign = childs.next()
self.assert_value(node, childs.next().value, 'in')
iterable = self.dispatch(childs.next())
if isinstance(iterable, list):
iterable = '[%s]' % ', '.join([str(i) for i in iterable])
self.assert_value(node, childs.next().value, ':')
if True:# len(node.children) > 6 and node.children[6].value == 'else':
floop = self.get_tmp_jsname('for$')
floopdecl = 'var %s = [];\n' % floop
floopass = '\n++%s = true;' % floop
floopass = ''
else:
floop = None
floopdecl = floopass = ''
iter = self.get_tmp_jsname('iter$')
loop = self.get_tmp_jsname('for$')
jsvars.update(locals())
# There's a special 'next$' method that doesn't throw
# StopIteration, but returns void instead.
self.add_lines("""\
var %(iter)s = %(builtin)s['iter'](%(iterable)s, %(None)s);
%(floopdecl)svar %(loop)s;
try {
+ for (;;) {
++ %(loop)s = %(floop)s;
++ %(loop)s = %(mcall)s(%(module)s, %(lineno)s, %(iter)s, 'next');\
%(floopass)s""" % jsvars, lineno)
indent_level = self.indent(2)
n = self.create_assign_stmt(
subst_lhs=assign,
subst_rhs=loop,
).children[0]
line = self.node_expr_stmt(n)
if line.rstrip()[-1] != ';':
line += ';'
self.add_lines(line)
n = len(self.lines)
self.dispatch(childs.next())
self.assert_dedent(self.dedent(2), indent_level)
self.add_lines("""\
+ }
} catch (%(catch)s) {
+ if (%(loop)s !== %(floop)s || %(catch)s['__class__'] !== %(builtin)s['StopIteration']) throw %(catch)s;\
""" % jsvars)
if len(node.children) > 6 and node.children[6].value == 'else':
if len(self.lines) > n:
self.add_lines("""\
}
if (%(loop)s === %(floop)s) {""" % jsvars)
else:
self.add_lines("""\
}
if (true) {""" % jsvars)
indent_level = self.indent(1)
self.assert_value(node, childs.next().value, 'else')
self.assert_value(node, childs.next().value, ':')
self.dispatch(childs.next())
self.assert_dedent(self.dedent(1), indent_level)
self.add_lines("""\
}""" % jsvars)
self.inloop -= 1
def node_funcdef(self, node, is_lambda=False):
jsvars = self.jsvars.copy()
#print 'node.depth():', node.depth()
depth = self.get_names_depth()
childs = node.children.__iter__()
if is_lambda:
self.assert_value(node, childs.next().value, 'lambda')
name = "<lambda>"
assign_name = self.get_tmp_jsname('lamb$')
assign = "var %s = " % assign_name
if isinstance(node.children[1], Leaf) and \
node.children[1].value == ':':
params = Parameters([], None, None, None)
else:
child = childs.next()
if isinstance(child, Leaf):
params = Parameters([child.value], None, None, None)
#args, star_args, dstar_args, defaults
else:
params = self.dispatch(child)
else:
self.assert_value(node, childs.next().value, 'def')
name = childs.next().value
self.add_name(name, force=True)
assign = ''
params = self.dispatch(childs.next())
self.assert_instance(node, params, Parameters)
if params.star_args is None:
star_args = 'null'
else:
star_args = repr(params.star_args)
if params.dstar_args is None:
dstar_args = 'null'
else:
dstar_args = repr(params.dstar_args)
if params.defaults is None:
defaults = 'null'
else:
defaults = ', '.join([str(self.dispatch(i)) for i in params.defaults])
defaults = '[%s]' % defaults
tfpdef = {}
jsargs = []
locals_init = []
for arg in params.all_args:
if isinstance(arg, tuple):
tfpdef_name = self.get_tmp_jsname('tfpdef$')
jsargs.append(tfpdef_name)
def flatten_arg(names):
args = []
for name in names:
if isinstance(name, tuple):
name = flatten_arg(name)
else:
self.add_name(name)
args.append(name)
args = ', '.join(args)
return "(%s)" % args
tfpdef[tfpdef_name] = flatten_arg(arg)
else:
jsargs.append('$%s' % arg)
if is_lambda:
locals_init.append(
"%s['%s'] = $%s;" % (
jsvars['locals'],
arg,
arg,
)
)
else:
locals_init.append("'%s': $%s" % (arg, arg))
args = params.args
if not jsargs:
jsargs = ''
else:
jsargs = ', '.join(jsargs)
self.assert_value(node, childs.next().value, ':')
type = self.next_func_type[-1]
lineno = self.track_lineno(node)
jsvars.update(locals())
self.add_lines("""\
%(assign)s(function(%(funcbase)s, $%(locals)s){
+ return %(builtin)s['func'](%(module)s, %(lineno)s, %(funcbase)s, '%(name)s', %(type)s, %(args)r, %(star_args)s, %(dstar_args)s, %(defaults)s, function(%(jsargs)s) {\
""" % jsvars)
if not is_lambda:
self.add_lines("""\
++ var %(namestack)s = $%(locals)s.slice(0);
++ var %(locals)s = {""" % jsvars)
else:
for line in locals_init:
self.add_lines("++%s" % line)
lines = self.lines
self.lines = func_lines = []
names = Names()
self.names.append(names)
for a in params.all_args:
self.add_name(a)
if not is_lambda:
self.collect_locals(node)
indent_level = self.indent(2)
try:
lambda_code = None
while True:
child = childs.next()
code = self.dispatch(child)
if is_lambda:
if lambda_code is None:
self.add_lines(
"return %s" % code, self.track_lineno(child),
)
lambda_code = code
else:
self.TranslationError(
"Multiple lines found in lambda definition",
self.get_lineno(node)
)
except StopIteration:
pass
self.assert_dedent(self.dedent(2), indent_level)
self.lines = lines
assert names is self.names.pop()
for n in names.itervalues():
if n.name not in params.all_args and \
n.builtin is False and \
n.to_js is None and \
n.depth <= depth and \
n.glob is False:
scope = 'locals'
src = self.jsvars['funcbase']
src = '$%(locals)s' % self.jsvars
src = '%s[%s]' % (self.jsvars['namestack'], n.depth)
locals_init.append(
"'%s': %s['%s']" % (
n.name, src, n.name,
),
)
locals_init = ',\n+++'.join(locals_init)
locals_init = locals_init.strip()
jsvars.update(locals())
if not is_lambda:
self.add_lines("""\
+++ %(locals_init)s
++ };
++ %(namestack)s[%(depth)s + 1] = %(locals)s;""" % jsvars)
if tfpdef:
self.indent(2)
for k, v in tfpdef.iteritems():
n = self.create_assign_stmt(
lhs=str(v),
subst_rhs=Code(k, self.track_lineno(node)),
).children[0]
line = self.node_expr_stmt(n)
self.add_lines(line)
self.dedent(2)
self.lines += func_lines
if is_lambda:
self.add_lines("""\
+ });
})({}, %(namestack)s);""" % jsvars)
return assign_name
if isinstance(self.names[-1], ClassNames):
self.add_lines("""\
++ return %(None)s;
+ });
})(%(funcbase)s, %(namestack)s);""" % jsvars)
else:
self.add_lines("""\
++ return %(None)s;
+ });
})(%(locals)s, %(namestack)s);""" % jsvars)
return name
def node_global_stmt(self, node):
for child in node.children[1:]:
if child.value == ',':
continue
self.add_name(child.value, glob=True)
def node_if_stmt(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
try:
while True:
stmt = childs.next()
if stmt.value == 'if':
test = self.get_test(childs.next())
test = "if (%s) {" % test
self.add_lines(test, self.track_lineno(stmt))
elif stmt.value == 'elif':
test = self.get_test(childs.next())
test = "} else if (%s) {" % test
self.add_lines(test)
elif stmt.value == 'else':
self.add_lines('} else {')
self.assert_value(node, childs.next().value, ':')
self.indent()
self.dispatch(childs.next())
self.dedent()
except StopIteration:
pass
self.add_lines("}")
def node_import_as_name(self, node):
assert len(node.children) == 3
return node.children[0].value, node.children[2].value
def node_import_as_names(self, node):
names = []
for child in node.children:
if isinstance(child, Leaf):
if child.value == ',':
continue
names.append([child.value, None])
else:
names.append(self.dispatch(child))
return names
def node_import_from(self, node):
jsvars = self.jsvars.copy()
imports = []
if isinstance(node.children[1], Leaf):
modname = node.children[1].value
else:
modname = '.'.join(self.dispatch(node.children[1]))
for child in node.children[3:]:
if isinstance(child, Leaf):
if child.value in ['(', ')']:
continue
names = [[child.value, None]]
else:
names = self.dispatch(child)
if not isinstance(names, list):
names = [names]
if modname == '__pyjamas__':
for name, assname in names:
if not hasattr(__Pyjamas__, name):
raise self.TranslationError(
"ImportError: cannot import name %s from %s" % (
name, modname,
),
)
to_js = getattr(__Pyjamas__, name)(self)
self.add_name(name, to_js=to_js, force=True)
return
if modname == '__javascript__':
for name, assname in names:
if assname is None:
assname = name
self.add_name(assname, to_js=name, force=True)
return
if modname == '__future__':
for name, assname in names:
imp = 'import_%s' % name
imp = getattr(self.__future__, imp, None)
if imp is None:
raise self.TranslationError(
"SyntaxError: future feature %s is not defined" % (
name,
),
)
imp(self)
level = modname
modname = modname.lstrip('.')
level = len(level) - len(modname)
if level == 0:
level = 'null'
else:
modname = ''
c_modname = self.add_const_str(modname)
assnames = []
fromlist = []
for name, assname in names:
c_name = self.add_const_str(name)
fromlist.append(c_name)
if assname is None:
assnames.append([c_name, 'null'])
assname = name
else:
c_assname = self.add_const_str(assname)
assnames.append([c_name, c_assname])
self.add_name(assname)
self.add_import(modname, [name])
if assnames:
assnames = "[%s]" % '], ['.join(["%s, %s" % (i, j) for (i, j) in assnames])
else:
assnames = ''
fromlist = ', '.join(fromlist)
jsvars.update(
modname=c_modname,
fromlist=fromlist,
assnames=assnames,
level=level,
)
imports.append("""\
%(builtin)s['_import']([%(assnames)s], %(modname)s, %(globals)s, %(locals)s, [%(fromlist)s], %(level)s);""" % jsvars)
return "\n".join(imports)
def node_import_name(self, node):
jsvars = self.jsvars.copy()
imports = []
for child in node.children[1:]:
if isinstance(child, Leaf):
imp = child.value
else:
imp = self.dispatch(child)
if not isinstance(imp, list):
imp = [imp]
if not isinstance(imp[0], Import):
imp = [Import(imp)]
for i in imp:
self.add_name(i.assname)
self.add_import(i.modname, None)
c_modname = self.add_const_str(i.modname)
c_assname = self.add_const_str(i.assname)
assnames = "%s, %s" % (c_modname, c_assname)
dst = self.get_jsname(i.assname)
jsvars.update(
dst=dst,
modname=c_modname,
assname=i.assname,
assnames=assnames,
)
imports.append("""\
%(dst)s = %(builtin)s['_import']([], %(modname)s, %(globals)s, %(locals)s, null, null);""" % jsvars)
return "\n".join(imports)
def node_import_stmt(self, node):
self.not_implemented(node)
def node_lambdef(self, node):
return self.node_funcdef(node, is_lambda=True)
def node_listmaker(self, node, cls='list'):
items = []
for child in node.children:
if isinstance(child, Leaf):
if child.value != ',':
items.append(self.dispatch(child))
elif type_repr(child.type) == 'comp_for':
# list comprehension
# create a for_stmt lookalike
simple_stmt = self.create_call_stmt(
'a.append',
[node.children[0]],
subst_base='comp$',
)
childs = child.children.__iter__()
base_node = comp_node = child
comp_node.type = sym_type('for_stmt')
comp_node.children = []
child = childs.next()
try:
while True:
while isinstance(child, Leaf) or \
not type_repr(child.type) in ['comp_if', 'comp_for']:
comp_node.append_child(child)
child = childs.next()
childs = child.children.__iter__()
comp_node.append_child(Leaf(11, ':'))
comp_node.append_child(child)
comp_node = child
comp_node.children = []
if type_repr(comp_node.type) == 'comp_for':
comp_node.type = sym_type('for_stmt')
else:
comp_node.type = sym_type('if_stmt')
child = childs.next()
except StopIteration:
pass
comp_node.append_child(Leaf(11, ':'))
comp_node.append_child(simple_stmt)
lines = self.lines
self.lines = []
self.node_for_stmt(base_node)
i = self.indent_level - 1
comp = '\n'.join(["%s%s" % ('+' * (j-i), s) for j, s in self.lines])
self.lines = lines
jsvars = self.jsvars.copy()
jsvars.update(locals())
retval = 'comp$'
if cls != 'list':
retval = "%(module)s['$new'](%(builtin)s['%(cls)s'], comp$)" % jsvars
jsvars.update(locals())
return """\
(function() {
+ var comp$ = %(module)s['$new'](%(builtin)s['list'], []);
%(comp)s
+ return %(retval)s;
})()""" % jsvars
else:
items.append(self.dispatch(child))
return items
def node_not_test(self, node, jsbool=False, name=None):
jsvars = self.jsvars.copy()
if len(node.children) == 2:
self.assert_value(node, node.children[0].value, 'not')
value = self.dispatch(node.children[1])
jsvars.update(locals())
if jsbool:
return "%(builtin)s['test_not'](%(value)s)" % jsvars
return "%(builtin)s['op_not'](%(value)s)" % jsvars
self.not_implemented(node)
def node_old_lambdef(self, node):
self.not_implemented(node)
def node_old_test(self, node):
self.not_implemented(node)
def node_or_test(self, node, jsbool=False, name=None):
jsvars = self.jsvars.copy()
args = []
childs = node.children.__iter__()
if jsbool:
name = None
elif name is None:
name = self.get_tmp_jsname('or$')
self.add_lines("var %s" % name)
args.append(self.get_test(childs.next(), name))
try:
while True:
self.assert_value(node, childs.next().value, 'or')
args.append(self.get_test(childs.next(), name))
except StopIteration:
pass
test = ' || '.join(args)
if jsbool:
return test
return '(%s ? %s : %s)' % (test, name, name)
def node_parameters(self, node):
self.assert_value(node, node.children[0].value, '(')
self.assert_value(node, node.children[-1].value, ')')
assert len(node.children) <= 3
if len(node.children) == 2:
return Parameters([], None, None, None)
if isinstance(node.children[1], Leaf):
args = node.children[1].value
else:
args = self.dispatch(node.children[1])
if isinstance(args, basestring):
return Parameters([args], None, None, None)
return self.dispatch(node.children[1])
def node_pass_stmt(self, node):
self.not_implemented(node)
def node_power(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
left = self.dispatch(childs.next())
if isinstance(node.children[1], Leaf):
if node.children[1].value == '**':
assert len(node.children) == 3
jsvars.update(
left=self.dispatch(node.children[0]),
right=self.dispatch(node.children[2]),
)
return "%(builtin)s['op_pow'](%(left)s, %(right)s)" % jsvars
elif isinstance(node.children[1], Node):
attrs = []
lineno = self.track_lineno(node)
for child in node.children[1:]:
if isinstance(child, Leaf) and \
child.value == '**':
assert child is node.children[-2]
jsvars.update(
left=left,
right=self.dispatch(node.children[-1]),
)
return "%(builtin)s['op_pow'](%(left)s, %(right)s)" % jsvars
trailer = self.dispatch(child)
if isinstance(trailer, Attribute):
assert not isinstance(left, __Pyjamas__.__Pyjamas__), node
attrs.append(trailer.name)
elif isinstance(trailer, Slice):
assert not isinstance(left, __Pyjamas__.__Pyjamas__), node
assign = self.assign_state is not False and \
child is node.children[-1]
if assign is False:
method = "'getslice'"
else:
method = "'setslice'"
if len(trailer.items) == 1:
if assign is False:
method = '%(getitem)s' % jsvars
else:
method = "'setitem'" % jsvars
args = trailer.items[0]
elif len(trailer.items) == 2:
args = list(trailer.items)
if args[0] == None:
args[0] = self.add_const_int(0)
if args[1] == None:
args[1] = self.add_const_int(2147483647)
args = "[%s]" % ', '.join(str(i) for i in args)
else:
args = []
for a in trailer.items:
if a is None:
args.append(jsvars['None'])
else:
args.append(a)
args = ', '.join(args)
jsvars.update(args=args)
args = "%(fcall)s(%(module)s, null, %(builtin)s['slice'], null, %(args)s)" % jsvars
if attrs:
attrs = ', '.join(attrs)
jsvars.update(attrs=attrs, left=left)
left = """\
%(getattr)s(%(left)s, [%(attrs)s])""" % jsvars
attrs = []
jsvars.update(locals())
if self.assign_state is False:
left = """\
%(builtin)s[%(method)s](%(left)s, %(args)s)""" % jsvars
else:
jsvars['what'] = self.assign_state
left = """\
%(builtin)s[%(method)s](%(left)s, %(args)s, %(what)s)""" % jsvars
elif isinstance(trailer, Parameters):
params = trailer
args = ''
if params.args:
args += ', ' + ', '.join([str(a) for a in params.args])
star_args = params.star_args
dstar_args = params.dstar_args
named_args = params.named_args
extended = False
if star_args or dstar_args or named_args:
extended = True
if star_args is None:
star_args = 'null'
if dstar_args is None:
dstar_args = 'null'
if named_args is None:
named_args = 'null'
else:
named_args = ', '.join([
'%r: %s' % (k, v) for k,v in named_args.iteritems()
])
named_args = "{%s}" % named_args
if isinstance(left, __Pyjamas__.__Pyjamas__):
if params.named_args is None:
left = Code(left.js(self, *params.args), self.get_lineno(node))
else:
left = Code(left.js(self, *params.args, **params.named_args), self.get_lineno(node))
elif not attrs:
jsvars.update(locals())
if star_args is None:
left = """\
%(fcall)s(%(module)s, %(lineno)s, %(left)s, null%(args)s)""" % jsvars
else:
left = """\
%(fcallext)s(%(module)s, %(lineno)s, %(left)s, null%(args)s, %(star_args)s, %(dstar_args)s, %(named_args)s)""" % jsvars
else:
attrs = ', '.join(attrs)
jsvars.update(locals())
if star_args is None:
left = """\
%(mcall)s(%(module)s, %(lineno)s, %(left)s, [%(attrs)s]%(args)s)""" % jsvars
else:
left = """\
%(mcallext)s(%(module)s, %(lineno)s, %(left)s, [%(attrs)s]%(args)s, %(star_args)s, %(dstar_args)s, %(named_args)s)""" % jsvars
attrs = []
if attrs:
attrs = ', '.join(attrs)
if self.assign_state is False:
jsvars.update(locals())
return Code("""\
%(getattr)s(%(left)s, [%(attrs)s])""" % jsvars, self.get_lineno(node))
else:
value = self.assign_state
jsvars.update(locals())
return Code("""\
%(setattr)s(%(left)s, [%(attrs)s], %(value)s)""" % jsvars, self.get_lineno(node))
return Code(left, self.get_lineno(node))
self.not_implemented(node)
def node_print_stmt(self, node):
if self.options.print_statements is False:
return
jsvars = self.jsvars.copy()
self.assert_value(node, node.children[0].value, 'print')
args = []
for child in node.children[1:]:
if isinstance(child, Leaf) and \
child.value == ',':
continue
arg = self.dispatch(child)
if child is not None:
args.append(arg)
newline = 'true'
if isinstance(node.children[-1], Leaf) and \
node.children[-1].value == ',':
newline = 'false'
args = ', '.join([str(i) for i in args])
jsvars.update(locals())
return """\
%(builtin)s['printFunc']([%(args)s], %(newline)s)""" % jsvars
def node_raise_stmt(self, node):
jsvars = self.jsvars.copy()
args = []
for child in node.children[1:]:
if isinstance(child, Leaf) and child.value == ',':
continue
args.append(self.dispatch(child))
args = ', '.join([str(a) for a in args])
jsvars.update(locals())
return "return %(builtin)s['raise'](%(args)s)" % jsvars
def node_return_stmt(self, node):
assert len(node.children) == 2
value = self.dispatch(node.children[1])
if isinstance(value, list):
jsvars = self.jsvars.copy()
items = ', '.join([str(i) for i in value])
jsvars.update(locals())
return "return %(tuple)s([%(items)s])" % jsvars
return "return %s" % value
def node_shift_expr(self, node):
return self.get_bit_expr(node)
def node_simple_stmt(self, node):
for child in node.children:
if isinstance(child, Leaf):
if child.value in ['break', 'continue']:
self.add_lines('%s;' % child.value, self.get_lineno(child))
continue
code = self.dispatch(child)
if isinstance(code, Code):
code = code.code
code = str(code)
if code is not None:
if code[-1] not in [';', '}']:
code += ';'
self.add_lines(code, self.get_lineno(child))
#self.add_lines(code)
def node_single_input(self, node):
self.not_implemented(node)
def node_sliceop(self, node):
assert len(node.children) == 2
self.assert_value(node, node.children[0].value, ':')
return self.dispatch(node.children[1])
def node_small_stmt(self, node):
self.not_implemented(node)
def node_star_expr(self, node):
self.not_implemented(node)
def node_stmt(self, node):
self.not_implemented(node)
def node_subscript(self, node):
slice = [None]
for child in node.children:
if isinstance(child, Leaf) and child.value == ':':
slice.append(None)
continue
slice[-1] = self.dispatch(child)
return Slice(tuple(slice))
def node_subscriptlist(self, node):
# d = {}; d[1,2] = 1
jsvars = self.jsvars.copy()
args = []
childs = node.children.__iter__()
args.append(self.dispatch(childs.next()))
try:
while True:
self.assert_value(node, childs.next().value, ',')
args.append(self.dispatch(childs.next()))
except StopIteration:
pass
args = ', '.join([str(i) for i in args])
jsvars.update(locals())
return "%(tuple)s([%(args)s])" % jsvars
def node_suite(self, node):
for child in node.children:
if isinstance(child, Node):
self.dispatch(child)
def node_term(self, node):
# x * y / x
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
left = self.dispatch(childs.next())
try:
while True:
op = childs.next().value
right = self.dispatch(childs.next())
op = self.op_names2.get(op, None)
if op is None:
self.not_implemented(node)
jsvars.update(left=left, op=op, right=right)
left = "%(builtin)s[%(op)r](%(left)s, %(right)s)" % jsvars
except StopIteration:
pass
return left
def node_test(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
left = self.dispatch(childs.next())
self.assert_value(node, childs.next().value, 'if')
test = self.get_test(childs.next())
self.assert_value(node, childs.next().value, 'else')
right = self.dispatch(childs.next())
return '%s ? %s : %s' % (test, left, right)
def node_testlist(self, node):
items = []
childs = node.children.__iter__()
try:
while True:
items.append(self.dispatch(childs.next()))
self.assert_value(node, childs.next().value, ',')
except StopIteration:
pass
return items
def node_testlist1(self, node):
self.not_implemented(node)
def node_testlist_gexp(self, node):
items = []
for child in node.children:
if isinstance(child, Leaf):
if child.value == ',':
continue
else:
if type_repr(child.type) == 'comp_for':
return self.node_listmaker(node, cls='tuple')
items.append(self.dispatch(child))
return items
def node_testlist_safe(self, node):
self.not_implemented(node)
def node_testlist_star_expr(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
items = []
try:
while True:
items.append(self.dispatch(childs.next()))
self.assert_value(node, childs.next().value, ',')
except StopIteration:
pass
cls = 'tuple'
items = ', '.join([str(i) for i in items])
jsvars.update(locals())
return "%(module)s['$new'](%(builtin)s['%(cls)s'], [%(items)s])" % jsvars
def node_tfpdef(self, node):
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, '(')
tpl = self.dispatch(childs.next())
self.assert_value(node, childs.next().value, ')')
return tuple(tpl)
def node_tfplist(self, node):
childs = node.children.__iter__()
tpl = []
try:
while True:
child = childs.next()
if isinstance(child, Leaf):
tpl.append(child.value)
else:
tpl.append(self.dispatch(child))
self.assert_value(node, childs.next().value, ',')
except StopIteration:
pass
return tpl
def node_tname(self, node):
self.not_implemented(node)
def node_trailer(self, node):
if node.children[0].value == '(':
args = []
star_args = None
dstar_args = None
defaults = None
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, '(')
while True:
child = childs.next()
if isinstance(child, Leaf):
if child.value == ')':
break
args.append(self.dispatch(child))
else:
arg = self.dispatch(child)
if isinstance(arg, list):
args += arg
else:
args.append(arg)
return Parameters(args, star_args, dstar_args, defaults)
elif node.children[0].value == '.':
assert len(node.children) == 2
if isinstance(node.children[1], Leaf):
return Attribute(repr(node.children[1].value))
return Attribute(self.dispatch(node.children[1]))
elif node.children[0].value == '[':
slice = []
assert len(node.children) == 3
if isinstance(node.children[1], Leaf) and \
node.children[1].value == ':':
return Slice((None, None))
slice = self.dispatch(node.children[1])
if isinstance(slice, Slice):
return slice
return Slice((slice, ))
self.not_implemented(node)
def node_try_stmt(self, node):
jsvars = self.jsvars.copy()
childs = node.children.__iter__()
# syntax ensures fixed order
# try:
# except-clauses:
# except:
# else:
# finally
self.assert_value(node, childs.next().value, 'try')
self.assert_value(node, childs.next().value, ':')
_try = childs.next()
_except_clauses = []
_except = None
_else = None
_finally = None
try:
while True:
child = childs.next()
if isinstance(child, Leaf):
if child.value == 'except':
self.assert_value(node, childs.next().value, ':')
_except = childs.next()
elif child.value == 'else':
self.assert_value(node, childs.next().value, ':')
_else = childs.next()
elif child.value == 'finally':
self.assert_value(node, childs.next().value, ':')
_finally = childs.next()
else:
raise NotImplementedError(repr(node))
elif type_repr(child.type) == 'except_clause':
self.assert_value(node, childs.next().value, ':')
_except_clauses.append((child, childs.next()))
else:
raise NotImplementedError(repr(node))
except StopIteration:
pass
track_len = self.get_tmp_jsname('track$len')
self.add_lines("var %s = $pyjs.trackstack.length;" % track_len)
self.add_lines("try {", node.children[0].lineno)
self.indent()
self.dispatch(_try)
if _else is not None:
self.add_lines("throw %(try_else)s" % jsvars)
self.dedent()
self.add_lines("} catch (%(catch)s) {" % jsvars)
self.indent()
self.add_lines("var %(catchclass)s = %(catch)s['__class__'];" % jsvars)
self.add_lines(
"$pyjs.trackstack.splice(%s, $pyjs.trackstack.length);" % (
track_len,
),
)
elseif = "if"
jsvars.update(locals())
if _else is not None:
self.add_lines("if (%(catchclass)s === %(try_else)s['__class__']) {" % jsvars)
self.indent()
self.dispatch(_else)
self.dedent()
elseif = "} else if"
for clause, suite in _except_clauses:
# See node_except_clause
classes, varname = self.dispatch(clause)
jsvars['classes'] = ', '.join([str(i) for i in classes])
jsvars['elseif'] = elseif
self.add_lines("""%(elseif)s(%(builtin)s['_issubclass'](%(catchclass)s, [%(classes)s])) {""" % jsvars)
self.indent()
if varname:
self.add_lines("%s = %s;" % (varname, jsvars['catch']))
self.dispatch(suite)
self.dedent()
elseif = "} else if"
if elseif != "if":
self.add_lines("} else {")
self.indent()
if _except is None:
self.add_lines("throw %(catch)s;" % jsvars)
else:
self.dispatch(_except)
if elseif != "if":
self.dedent()
self.add_lines("}")
if _finally is not None:
self.dedent()
self.add_lines("} finally {")
self.indent()
self.dispatch(_finally)
self.dedent()
self.add_lines("}")
def node_typedargslist(self, node):
childs = node.children.__iter__()
args = []
star_args = None
dstar_args = None
defaults = []
try:
while True:
child = childs.next()
if isinstance(child, Leaf):
if child.value == '*':
star_args = childs.next().value
elif child.value == '**':
dstar_args = childs.next().value
elif child.value == '=':
defaults.append(childs.next())
elif child.value != ',':
args.append(child.value)
elif type_repr(child.type) == 'tfpdef':
args.append(self.dispatch(child))
else:
self.not_implemented(node)
except StopIteration:
pass
return Parameters(args, star_args, dstar_args, defaults)
def node_varargslist(self, node):
# Used from node_lambdef:
# f = lambda a, *args, **kwargs: tuple([a, args, kwargs])
return self.node_typedargslist(node)
def node_vfpdef(self, node):
self.not_implemented(node)
def node_vfplist(self, node):
self.not_implemented(node)
def node_vname(self, node):
self.not_implemented(node)
def node_while_stmt(self, node):
jsvars = self.jsvars.copy()
inloop = self.inloop
self.inloop += 1
childs = node.children.__iter__()
self.assert_value(node, childs.next().value, 'while')
test = self.get_test(childs.next())
self.assert_value(node, childs.next().value, ':')
if len(node.children) > 4 and node.children[4].value == 'else':
loop = self.get_tmp_jsname('while$')
loopdecl = 'var %s = false;\n' % loop
loopass = '\n+%s = true;' % loop
else:
loop = None
loopdecl = loopass = ''
jsvars.update(locals())
self.add_lines("""\
%(loopdecl)swhile (%(test)s) {%(loopass)s""" % jsvars, node.children[0].lineno)
self.indent()
self.dispatch(childs.next())
self.dedent()
self.add_lines("};")
if loop is not None:
self.assert_value(node, childs.next().value, 'else')
self.assert_value(node, childs.next().value, ':')
self.add_lines("""\
if (!(%(loop)s).valueOf()) {""" % jsvars, )
self.indent()
self.dispatch(childs.next())
self.dedent()
self.add_lines("};")
self.inloop -= 1
def node_with_item(self, node):
self.not_implemented(node)
def node_with_stmt(self, node):
self.not_implemented(node)
def node_with_var(self, node):
self.not_implemented(node)
def node_xor_expr(self, node):
return self.get_bit_expr(node)
def node_yield_expr(self, node):
return
self.not_implemented(node)
def node_yield_stmt(self, node):
return
self.not_implemented(node)
def leaftype_name(self, leaf):
# type 1
if leaf.value.find('$') >= 0:
# this is an internal javascript variable
return leaf.value
self.add_name(leaf.value, leaf.lineno, force=False)
return self.get_jsname(leaf.value)
def leaftype_number(self, leaf):
# type 2
if re_oct_int.match(leaf.value):
i = str(int(leaf.value, 8))
return self.add_const_int(i)
if re_hex_int.match(leaf.value):
i = str(int(leaf.value, 16))
return self.add_const_int(i)
if re_int.match(leaf.value):
return self.add_const_int(leaf.value)
if re_oct_long.match(leaf.value):
i = str(long(leaf.value, 16))
return self.add_const_long(i)
if re_hex_long.match(leaf.value):
i = str(long(leaf.value, 16))
return self.add_const_long(i)
if re_long.match(leaf.value):
return self.add_const_long(leaf.value)
return self.add_const_float(leaf.value)
def substitute_js_chars(self, m):
c = m.group(0)
i = ord(c)
if i < 32:
return '\\x%02X' % i
if c in ["'", '"', '<', '>', '\\']:
return '\\x%02X' % i
return c
def leaftype_str(self, leaf):
# type 3
s = leaf.value
return self.add_const_str(eval(s))
def leaf_False(self, leaf):
return self.jsvars['False']
def leaf_None(self, leaf):
return self.jsvars['None']
def leaf_True(self, leaf):
return self.jsvars['True']
def leaf_pass(self, leaf):
return None
def leaf_return(self, leaf):
return 'return null'
###
# External hooks as in translator_proto
###
from options import (all_compile_options, add_compile_options,
get_compile_options, debug_options, speed_options,
pythonic_options)
if os.environ.has_key('PYJS_SYSPATH'):
sys.path[0:0] = [os.environ['PYJS_SYSPATH']]
import pyjs
if pyjs.pyjspth is None:
LIBRARY_PATH = os.path.abspath(os.path.dirname(__file__))
else:
LIBRARY_PATH = os.path.join(pyjs.pyjspth, "pyjs", "src", "pyjs")
TranslationError = Translator.TranslationError
def translate(compiler, sources, output_file, module_name=None, **kw):
global TranslationError
kw = dict(all_compile_options, **kw)
list_imports = kw.get('list_imports', False)
sources = map(os.path.abspath, sources)
output_file = os.path.abspath(output_file)
if not module_name:
module_name, extension = os.path.splitext(os.path.basename(sources[0]))
translator = Translator(sources[0], module_name, kw)
TranslationError = translator.TranslationError
trees = []
tree = None
for src in sources:
current_tree = translator.ast_tree_creator(src)
flags = set()
f = file(src)
for l in f:
if l.startswith('#@PYJS_'):
flags.add(l.strip()[7:])
f.close()
if tree:
tree = translator.tree_merge(tree, current_tree, flags)
else:
tree = current_tree
#XXX: if we have an override the sourcefile and the tree is not the same!
f = file(sources[0], "r")
src = f.read()
f.close()
if list_imports:
# TODO: use ImportVisitor instead of Translator
translator.dispatch_file(tree)
return translator.imported_modules.keys(), translator.imported_js.keys()
v = ImportVisitor(module_name)
compiler.walk(tree, v)
return v.imported_modules, v.imported_js
translator.dispatch_file(tree)
if output_file == '-':
output = sys.stdout
else:
output = file(output_file, 'w')
output.write(translator.get_javascript())
output.close()
return sorted(translator.imported_modules.keys()), sorted(translator.imported_js.keys())
class ImportVisitor(object):
def __init__(self, module_name):
self.module_name = module_name
self.imported_modules = []
self.imported_js = []
def add_imported_module(self, importName):
if not importName in self.imported_modules:
self.imported_modules.append(importName)
def visitModule(self, node):
self.visit(node.node)
def visitImport(self, node):
self._doImport(node.names)
def _doImport(self, names):
for importName, importAs in names:
if importName == '__pyjamas__':
continue
if importName.endswith(".js"):
continue
imp.add_imported_js(importName)
continue
self.add_imported_module(importName)
def visitFrom(self, node):
if node.modname == '__pyjamas__':
return
if node.modname == '__javascript__':
return
# XXX: hack for in-function checking, we should have another
# object to check our scope
absPath = False
modname = node.modname
if hasattr(node, 'level') and node.level > 0:
absPath = True
modname = self.module_name.split('.')
level = node.level
if len(modname) < level:
raise TranslationError(
"Attempted relative import beyond toplevel package",
node, self.module_name)
if node.modname != '':
level += 1
if level > 1:
modname = '.'.join(modname[:-(node.level-1)])
else:
modname = self.module_name
if node.modname != '':
modname += '.' + node.modname
if modname[0] == '.':
modname = modname[1:]
for name in node.names:
sub = modname + '.' + name[0]
ass_name = name[1] or name[0]
self._doImport(((sub, ass_name),))
if __name__ == "__main__":
translator = Translator(sys.argv[1], None, {})
translator.tree = translator.ast_tree_creator()
translator.dispatch_file(translator.tree)
print translator.get_javascript()
| anandology/pyjamas | pyjs/src/pyjs/translator_dict.py | Python | apache-2.0 | 117,085 | 0.001196 |
""" FileDialogDelegateQt.py: Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
import os.path
try:
from PyQt5.QtCore import Qt, QT_VERSION_STR
from PyQt5.QtWidgets import QStyledItemDelegate, QFileDialog
except ImportError:
try:
from PyQt4.QtCore import Qt, QT_VERSION_STR
from PyQt4.QtGui import QStyledItemDelegate, QFileDialog
except ImportError:
raise ImportError("FileDialogDelegateQt: Requires PyQt5 or PyQt4.")
__author__ = "Marcel Goldschen-Ohm <marcel.goldschen@gmail.com>"
class FileDialogDelegateQt(QStyledItemDelegate):
""" Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
""" Instead of creating an editor, just popup a modal file dialog
and set the model data to the selected file name, if any.
"""
pathToFileName = ""
if QT_VERSION_STR[0] == '4':
pathToFileName = QFileDialog.getOpenFileName(None, "Open")
elif QT_VERSION_STR[0] == '5':
pathToFileName, temp = QFileDialog.getOpenFileName(None, "Open")
pathToFileName = str(pathToFileName) # QString ==> str
if len(pathToFileName):
index.model().setData(index, pathToFileName, Qt.EditRole)
index.model().dataChanged.emit(index, index) # Tell model to update cell display.
return None
def displayText(self, value, locale):
""" Show file name without path.
"""
try:
if QT_VERSION_STR[0] == '4':
pathToFileName = str(value.toString()) # QVariant ==> str
elif QT_VERSION_STR[0] == '5':
pathToFileName = str(value)
path, fileName = os.path.split(pathToFileName)
return fileName
except:
return ""
| marcel-goldschen-ohm/ModelViewPyQt | FileDialogDelegateQt.py | Python | mit | 2,023 | 0.001483 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been created by ARSF Data Analysis Node and
# is licensed under the MIT Licence. A copy of this
# licence is available to download with this file.
#
# Author: Robin Wilson
# Created: 2015-11-16
import sys
import numpy as np
import pandas as pd
# Python 2/3 imports
try:
from StringIO import StringIO
except ImportError:
if sys.version_info[0] >= 3:
from io import StringIO
else:
raise
from . import spectra_reader
class DARTFormat(spectra_reader.SpectraReader):
"""
Class to read spectra from DART format files
"""
def get_spectra(self, filename):
"""
Extract spectra from a DART format file
Requires:
* filename - the filename to the DART format file to read
Returns:
* Spectra object with values, radiance, pixel and line
"""
f = open(filename, 'r')
s = StringIO()
within_comment = False
while True:
try:
line = f.next()
except:
break
if "*" in line and within_comment:
within_comment = False
continue
elif "*" in line and not within_comment:
within_comment = True
if not within_comment and not line.isspace():
s.write(line)
s.seek(0)
df = pd.read_table(s, header=None, names=["wavelength", "reflectance",
"refractive_index", "A", "Alpha",
"wHapke", "AHapkeSpec",
"AlphaHapkeSpec", "TDirect",
"TDiffuse"])
df.reflectance = df.reflectance / 100
wavelengths = np.array(df.wavelength)
reflectance = np.array(df.reflectance)
self.spectra.file_name = filename
self.spectra.wavelengths = wavelengths
self.spectra.values = reflectance
self.spectra.pixel = None
self.spectra.line = None
self.spectra.latitude = None
self.spectra.longitude = None
self.spectra.wavelength_units = "nm"
self.spectra.value_units = "reflectance"
self.spectra.value_scaling = 1
return self.spectra
| pmlrsg/PySpectra | PySpectra/dart.py | Python | mit | 2,368 | 0.000845 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> "_models.NatGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.NatGateway"]:
"""Creates or updates a nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to the create or update nat gateway operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NatGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.NatGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NatGateway":
"""Updates nat gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to update nat gateway tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all the Nat Gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all nat gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_nat_gateways_operations.py | Python | mit | 26,918 | 0.004681 |
# -*- coding: utf-8 -*-
from ..internal.DeadCrypter import DeadCrypter
class Movie2KTo(DeadCrypter):
__name__ = "Movie2KTo"
__type__ = "crypter"
__version__ = "0.56"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?movie2k\.to/(.+)\.html'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Movie2k.to decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("4Christopher", "4Christopher@gmx.de")]
| TheBraveWarrior/pyload | module/plugins/crypter/Movie2KTo.py | Python | gpl-3.0 | 472 | 0 |
config = {
"interfaces": {
"google.devtools.clouderrorreporting.v1beta1.ReportErrorsService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"ReportErrorEvent": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| jonparrott/google-cloud-python | error_reporting/google/cloud/errorreporting_v1beta1/gapic/report_errors_service_client_config.py | Python | apache-2.0 | 987 | 0 |
import random
rand = random.SystemRandom()
def rabinMiller(num):
if num % 2 == 0:
return False
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(64):
a = rand.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
| Fitzgibbons/Cryptograpy | rabinmiller.py | Python | mit | 526 | 0.001901 |
'''
salt.targeting
~~~~~~~~~~~~~~
'''
import logging
log = logging.getLogger(__name__)
from .parser import *
from .query import *
from .rules import *
from .subjects import *
#: defines minion targeting
minion_targeting = Query(default_rule=GlobRule)
minion_targeting.register(GlobRule, None, 'glob')
minion_targeting.register(GrainRule, 'G', 'grain')
minion_targeting.register(PillarRule, 'I', 'pillar')
minion_targeting.register(PCRERule, 'E', 'pcre')
minion_targeting.register(GrainPCRERule, 'P', 'grain_pcre')
minion_targeting.register(SubnetIPRule, 'S')
minion_targeting.register(ExselRule, 'X', 'exsel')
minion_targeting.register(LocalStoreRule, 'D')
minion_targeting.register(YahooRangeRule, 'R')
minion_targeting.register(ListEvaluator, 'L', 'list')
minion_targeting.register(NodeGroupEvaluator, 'N')
| johnnoone/salt-targeting | src/salt/targeting/__init__.py | Python | mit | 816 | 0.006127 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.messaging.notify import notifier
class NoOpDriver(notifier._Driver):
def notify(self, ctxt, message, priority):
pass
| citrix-openstack-build/oslo.messaging | oslo/messaging/notify/_impl_noop.py | Python | apache-2.0 | 811 | 0 |
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from binascii import b2a_hex
import bitcoin.script
from bitcoin.script import countSigOps
from bitcoin.txn import Txn
from bitcoin.varlen import varlenEncode, varlenDecode
from collections import deque
from copy import deepcopy
from queue import Queue
import jsonrpc
import logging
from math import log
from merkletree import MerkleTree
from struct import pack
import threading
from time import sleep, time
import traceback
import config
_makeCoinbase = [0, 0]
def MakeBlockHeader(MRD):
(merkleRoot, merkleTree, coinbase, prevBlock, bits) = MRD[:5]
timestamp = pack('<L', int(time()))
hdr = b'\2\0\0\0' + prevBlock + merkleRoot + timestamp + bits + b'iolE'
return hdr
def assembleBlock(blkhdr, txlist):
payload = blkhdr
payload += varlenEncode(len(txlist))
for tx in txlist:
payload += tx.data
return payload
class merkleMaker(threading.Thread):
OldGMP = None
GBTCaps = [
'coinbasevalue',
'coinbase/append',
'coinbase',
'generation',
'time',
'transactions/remove',
'prevblock',
]
GBTReq = {
'capabilities': GBTCaps,
}
GMPReq = {
'capabilities': GBTCaps,
'tx': 'obj',
}
def __init__(self, *a, **k):
super().__init__(*a, **k)
self.daemon = True
self.logger = logging.getLogger('merkleMaker')
self.CoinbasePrefix = b'Mined by Multicoin.co'
self.CoinbaseAux = {}
self.isOverflowed = False
self.lastWarning = {}
self.MinimumTxnUpdateWait = 1
self.overflowed = 0
self.DifficultyChangeMod = 1
def _prepare(self):
self.access = jsonrpc.ServiceProxy(self.UpstreamURI)
self.MinimumTxnUpdateWait = 1
self.IdleSleepTime = 1
self.TxnUpdateRetryWait = 1
self.ready = False
self.readyCV = threading.Condition()
self.currentBlock = (None, None, None)
self.lastBlock = (None, None, None)
self.currentMerkleTree = None
self.merkleRoots = deque(maxlen=self.WorkQueueSizeRegular[1])
self.LowestMerkleRoots = self.WorkQueueSizeRegular[1]
if not hasattr(self, 'WorkQueueSizeClear'):
self.WorkQueueSizeClear = self.WorkQueueSizeLongpoll
self._MaxClearSize = max(self.WorkQueueSizeClear[1], self.WorkQueueSizeLongpoll[1])
self.clearMerkleRoots = Queue(self._MaxClearSize)
self.LowestClearMerkleRoots = self.WorkQueueSizeClear[1]
self.nextMerkleRoots = Queue(self._MaxClearSize)
if not hasattr(self, 'WarningDelay'):
self.WarningDelay = max(15, self.MinimumTxnUpdateWait * 2)
if not hasattr(self, 'WarningDelayTxnLongpoll'):
self.WarningDelayTxnLongpoll = self.WarningDelay
if not hasattr(self, 'WarningDelayMerkleUpdate'):
self.WarningDelayMerkleUpdate = self.WarningDelay
self.lastMerkleUpdate = 0
self.nextMerkleUpdate = 0
def createClearMerkleTree(self, height):
subsidy = self.access.getblocktemplate()['coinbasevalue']
cbtxn = self.makeCoinbaseTxn(subsidy, False)
cbtxn.assemble()
return MerkleTree([cbtxn])
def updateBlock(self, newBlock, height = None, bits = None, _HBH = None):
if newBlock == self.currentBlock[0]:
if height in (None, self.currentBlock[1]) and bits in (None, self.currentBlock[2]):
return
if not self.currentBlock[2] is None:
self.logger.error('Was working on block with wrong specs: %s (height: %d->%d; bits: %s->%s' % (
b2a_hex(newBlock[::-1]).decode('utf8'),
self.currentBlock[1],
height,
b2a_hex(self.currentBlock[2][::-1]).decode('utf8'),
b2a_hex(bits[::-1]).decode('utf8'),
))
# Old block is invalid
if self.currentBlock[0] != newBlock:
self.lastBlock = self.currentBlock
lastHeight = self.currentBlock[1]
if height is None:
height = self.currentBlock[1] + 1
if bits is None:
if height % self.DifficultyChangeMod == 1 or self.currentBlock[2] is None:
self.logger.warning('New block: %s (height %d; bits: UNKNOWN)' % (b2a_hex(newBlock[::-1]).decode('utf8'), height))
# Pretend to be 1 lower height, so we possibly retain nextMerkleRoots
self.currentBlock = (None, height - 1, None)
self.clearMerkleRoots = Queue(0)
self.merkleRoots.clear()
self.ready = False
return
else:
bits = self.currentBlock[2]
if _HBH is None:
_HBH = (b2a_hex(newBlock[::-1]).decode('utf8'), b2a_hex(bits[::-1]).decode('utf8'))
self.logger.info('New block: %s (height: %d; bits: %s)' % (_HBH[0], height, _HBH[1]))
self.currentBlock = (newBlock, height, bits)
if lastHeight != height:
# TODO: Perhaps reuse clear merkle trees more intelligently
if lastHeight == height - 1:
self.curClearMerkleTree = self.nextMerkleTree
self.clearMerkleRoots = self.nextMerkleRoots
self.logger.debug('Adopting next-height clear merkleroots :)')
else:
if lastHeight:
self.logger.warning('Change from height %d->%d; no longpoll merkleroots available!' % (lastHeight, height))
self.curClearMerkleTree = self.createClearMerkleTree(height)
self.clearMerkleRoots = Queue(self.WorkQueueSizeClear[1])
self.nextMerkleTree = self.createClearMerkleTree(height + 1)
self.nextMerkleRoots = Queue(self._MaxClearSize)
else:
self.logger.debug('Already using clear merkleroots for this height')
self.currentMerkleTree = self.curClearMerkleTree
self.merkleRoots.clear()
if not self.ready:
self.ready = True
with self.readyCV:
self.readyCV.notify_all()
self.needMerkle = 2
self.onBlockChange()
def _trimBlock(self, MP, txnlist, txninfo, floodn, msgf):
fee = txninfo[-1].get('fee', None)
if fee is None:
raise self._floodCritical(now, floodn, doin=msgf('fees unknown'))
if fee:
# FIXME: coinbasevalue is *not* guaranteed to exist here
MP['coinbasevalue'] -= fee
txnlist[-1:] = ()
txninfo[-1:] = ()
return True
# Aggressive "Power Of Two": Remove transactions even with fees to reach our goal
def _APOT(self, txninfopot, MP, POTInfo):
feeTxnsTrimmed = 0
feesTrimmed = 0
for txn in txninfopot:
if txn.get('fee') is None:
self._floodWarning(now, 'APOT-No-Fees', doin='Upstream didn\'t provide fee information required for aggressive POT', logf=self.logger.info)
return
if not txn['fee']:
continue
feesTrimmed += txn['fee']
feeTxnsTrimmed += 1
MP['coinbasevalue'] -= feesTrimmed
POTInfo[2] = [feeTxnsTrimmed, feesTrimmed]
self._floodWarning(now, 'POT-Trimming-Fees', doin='Aggressive POT trimming %d transactions with %d.%08d BTC total fees' % (feeTxnsTrimmed, feesTrimmed//100000000, feesTrimmed % 100000000), logf=self.logger.debug)
return True
def _makeBlockSafe(self, MP, txnlist, txninfo):
blocksize = sum(map(len, txnlist)) + 80
while blocksize > 934464: # 1 "MB" limit - 64 KB breathing room
txnsize = len(txnlist[-1])
self._trimBlock(MP, txnlist, txninfo, 'SizeLimit', lambda x: 'Making blocks over 1 MB size limit (%d bytes; %s)' % (blocksize, x))
blocksize -= txnsize
# NOTE: This check doesn't work at all without BIP22 transaction obj format
blocksigops = sum(a.get('sigops', 0) for a in txninfo)
while blocksigops > 19488: # 20k limit - 0x200 breathing room
txnsigops = txninfo[-1]['sigops']
self._trimBlock(MP, txnlist, txninfo, 'SigOpLimit', lambda x: 'Making blocks over 20k SigOp limit (%d; %s)' % (blocksigops, x))
blocksigops -= txnsigops
# Aim to produce blocks with "Power Of Two" transaction counts
# This helps avoid any chance of someone abusing CVE-2012-2459 with them
POTMode = getattr(self, 'POT', 0)
txncount = len(txnlist) + 1
if POTMode:
feetxncount = txncount
for i in range(txncount - 2, -1, -1):
if 'fee' not in txninfo[i] or txninfo[i]['fee']:
break
feetxncount -= 1
if getattr(self, 'Greedy', None):
# Aim to cut off extra zero-fee transactions on the end
# NOTE: not cutting out ones intermixed, in case of dependencies
idealtxncount = feetxncount
else:
idealtxncount = txncount
pot = 2**int(log(idealtxncount, 2))
POTInfo = MP['POTInfo'] = [[idealtxncount, feetxncount, txncount], [pot, None], None]
if pot < idealtxncount:
if pot * 2 <= txncount:
pot *= 2
elif pot >= feetxncount:
pass
elif POTMode > 1 and self._APOT(txninfo[pot-1:], MP, POTInfo):
# Trimmed even transactions with fees
pass
else:
pot = idealtxncount
self._floodWarning(now, 'Non-POT', doin='Making merkle tree with %d transactions (ideal: %d; max: %d)' % (pot, idealtxncount, txncount))
POTInfo[1][1] = pot
pot -= 1
txnlist[pot:] = ()
txninfo[pot:] = ()
def updateMerkleTree(self):
global now
self.logger.debug('Polling bitcoind for memorypool')
self.nextMerkleUpdate = now + self.TxnUpdateRetryWait
try:
# First, try BIP 22 standard getblocktemplate :)
MP = self.access.getblocktemplate(self.GBTReq)
self.OldGMP = False
except:
try:
# Failing that, give BIP 22 draft (2012-02 through 2012-07) getmemorypool a chance
MP = self.access.getmemorypool(self.GMPReq)
except:
try:
# Finally, fall back to bitcoind 0.5/0.6 getmemorypool
MP = self.access.getmemorypool()
except:
MP = False
if MP is False:
# This way, we get the error from the BIP22 call if the old one fails too
raise
# Pre-BIP22 server (bitcoind <0.7 or Eloipool <20120513)
if not self.OldGMP:
self.OldGMP = True
self.logger.warning('Upstream server is not BIP 22 compatible')
oMP = deepcopy(MP)
prevBlock = bytes.fromhex(MP['previousblockhash'])[::-1]
if 'height' in MP:
height = MP['height']
else:
height = self.access.getinfo()['blocks'] + 1
bits = bytes.fromhex(MP['bits'])[::-1]
if (prevBlock, height, bits) != self.currentBlock:
self.updateBlock(prevBlock, height, bits, _HBH=(MP['previousblockhash'], MP['bits']))
txnlist = MP['transactions']
if len(txnlist) and isinstance(txnlist[0], dict):
txninfo = txnlist
txnlist = tuple(a['data'] for a in txnlist)
txninfo.insert(0, {
})
elif 'transactionfees' in MP:
# Backward compatibility with pre-BIP22 gmp_fees branch
txninfo = [{'fee':a} for a in MP['transactionfees']]
else:
# Backward compatibility with pre-BIP22 hex-only (bitcoind <0.7, Eloipool <future)
txninfo = [{}] * len(txnlist)
# TODO: cache Txn or at least txid from previous merkle roots?
txnlist = [a for a in map(bytes.fromhex, txnlist)]
self._makeBlockSafe(MP, txnlist, txninfo)
cbtxn = self.makeCoinbaseTxn(MP['coinbasevalue'])
cbtxn.setCoinbase(b'\0\0')
cbtxn.assemble()
txnlist.insert(0, cbtxn.data)
txnlist = [a for a in map(Txn, txnlist[1:])]
txnlist.insert(0, cbtxn)
txnlist = list(txnlist)
newMerkleTree = MerkleTree(txnlist)
if newMerkleTree.merkleRoot() != self.currentMerkleTree.merkleRoot():
newMerkleTree.POTInfo = MP.get('POTInfo')
newMerkleTree.oMP = oMP
if (not self.OldGMP) and 'proposal' in MP.get('capabilities', ()):
(prevBlock, height, bits) = self.currentBlock
coinbase = self.makeCoinbase(height=height)
cbtxn.setCoinbase(coinbase)
cbtxn.assemble()
merkleRoot = newMerkleTree.merkleRoot()
MRD = (merkleRoot, newMerkleTree, coinbase, prevBlock, bits)
blkhdr = MakeBlockHeader(MRD)
data = assembleBlock(blkhdr, txnlist)
propose = self.access.getblocktemplate({
"mode": "proposal",
"data": b2a_hex(data).decode('utf8'),
})
if propose is None:
self.logger.debug('Updating merkle tree (upstream accepted proposal)')
self.currentMerkleTree = newMerkleTree
else:
self.RejectedProposal = (newMerkleTree, propose)
try:
propose = propose['reject-reason']
except:
pass
self.logger.error('Upstream rejected proposed block: %s' % (propose,))
else:
self.logger.debug('Updating merkle tree (no proposal support)')
self.currentMerkleTree = newMerkleTree
self.lastMerkleUpdate = now
self.nextMerkleUpdate = now + self.MinimumTxnUpdateWait
if self.needMerkle == 2:
self.needMerkle = 1
self.needMerkleSince = now
def makeCoinbase(self, height):
now = int(time())
if now > _makeCoinbase[0]:
_makeCoinbase[0] = now
_makeCoinbase[1] = 0
else:
_makeCoinbase[1] += 1
rv = self.CoinbasePrefix
rv += pack('>L', now) + pack('>Q', _makeCoinbase[1]).lstrip(b'\0')
# NOTE: Not using varlenEncode, since this is always guaranteed to be < 100
rv = bytes( (len(rv),) ) + rv
for v in self.CoinbaseAux.values():
rv += v
if len(rv) > 95:
t = time()
if self.overflowed < t - 300:
self.logger.warning('Overflowing coinbase data! %d bytes long' % (len(rv),))
self.overflowed = t
self.isOverflowed = True
rv = rv[:95]
else:
self.isOverflowed = False
rv = bitcoin.script.encodeUNum(height) + rv
return rv
def makeMerkleRoot(self, merkleTree, height):
cbtxn = merkleTree.data[0]
cb = self.makeCoinbase(height=height)
cbtxn.setCoinbase(cb)
cbtxn.assemble()
merkleRoot = merkleTree.merkleRoot()
return (merkleRoot, merkleTree, cb)
_doing_last = None
def _doing(self, what):
if self._doing_last == what:
self._doing_i += 1
return
global now
if self._doing_last:
self.logger.debug("Switching from (%4dx in %5.3f seconds) %s => %s" % (self._doing_i, now - self._doing_s, self._doing_last, what))
self._doing_last = what
self._doing_i = 1
self._doing_s = now
def _floodWarning(self, now, wid, wmsgf = None, doin = True, logf = None):
if doin is True:
doin = self._doing_last
def a(f = wmsgf):
return lambda: "%s (doing %s)" % (f(), doin)
wmsgf = a()
winfo = self.lastWarning.setdefault(wid, [0, None])
(lastTime, lastDoing) = winfo
if now <= lastTime + max(5, self.MinimumTxnUpdateWait):
return
winfo[0] = now
nowDoing = doin
winfo[1] = nowDoing
if logf is None:
logf = self.logger.warning
logf(wmsgf() if wmsgf else doin)
def _makeOne(self, putf, merkleTree, height):
MT = self.currentMerkleTree
height = self.currentBlock[1]
MR = self.makeMerkleRoot(MT, height=height)
# Only add it if the height hasn't changed in the meantime, to avoid a race
if self.currentBlock[1] == height:
putf(MR)
def makeClear(self):
self._doing('clear merkle roots')
self._makeOne(self.clearMerkleRoots.put, self.curClearMerkleTree, height=self.currentBlock[1])
def makeNext(self):
self._doing('longpoll merkle roots')
self._makeOne(self.nextMerkleRoots.put, self.nextMerkleTree, height=self.currentBlock[1] + 1)
def makeRegular(self):
self._doing('regular merkle roots')
self._makeOne(self.merkleRoots.append, self.currentMerkleTree, height=self.currentBlock[1])
def merkleMaker_II(self):
global now
# No bits = no mining :(
if not self.ready:
return self.updateMerkleTree()
# First, ensure we have the minimum clear, next, and regular (in that order)
if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[0]:
return self.makeClear()
if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[0]:
return self.makeNext()
if len(self.merkleRoots) < self.WorkQueueSizeRegular[0]:
return self.makeRegular()
# If we've met the minimum requirements, consider updating the merkle tree
if self.nextMerkleUpdate <= now:
return self.updateMerkleTree()
# Finally, fill up clear, next, and regular until we've met the maximums
if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[1]:
return self.makeClear()
if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[1]:
return self.makeNext()
if len(self.merkleRoots) < self.WorkQueueSizeRegular[1] or self.merkleRoots[0][1] != self.currentMerkleTree:
return self.makeRegular()
# Nothing left to do, fire onBlockUpdate event (if appropriate) and sleep
if self.needMerkle == 1:
self.onBlockUpdate()
self.needMerkle = False
self._doing('idle')
# TODO: rather than sleepspin, block until MinimumTxnUpdateWait expires or threading.Condition(?)
sleep(self.IdleSleepTime)
def merkleMaker_I(self):
global now
now = time()
self.merkleMaker_II()
if self.needMerkle == 1 and now > self.needMerkleSince + self.WarningDelayTxnLongpoll:
self._floodWarning(now, 'NeedMerkle', lambda: 'Transaction-longpoll requested %d seconds ago, and still not ready. Is your server fast enough to keep up with your configured WorkQueueSizeRegular maximum?' % (now - self.needMerkleSince,))
if now > self.nextMerkleUpdate + self.WarningDelayMerkleUpdate:
self._floodWarning(now, 'MerkleUpdate', lambda: "Haven't updated the merkle tree in at least %d seconds! Is your server fast enough to keep up with your configured work queue minimums?" % (now - self.lastMerkleUpdate,))
def run(self):
while True:
try:
self.merkleMaker_I()
except:
self.logger.critical(traceback.format_exc())
def start(self, *a, **k):
self._prepare()
super().start(*a, **k)
def getMRD(self):
try:
MRD = self.merkleRoots.pop()
self.LowestMerkleRoots = min(len(self.merkleRoots), self.LowestMerkleRoots)
rollPrevBlk = False
except IndexError:
qsz = self.clearMerkleRoots.qsize()
if qsz < 0x10:
self.logger.warning('clearMerkleRoots running out! only %d left' % (qsz,))
MRD = self.clearMerkleRoots.get()
self.LowestClearMerkleRoots = min(self.clearMerkleRoots.qsize(), self.LowestClearMerkleRoots)
rollPrevBlk = True
(merkleRoot, merkleTree, cb) = MRD
(prevBlock, height, bits) = self.currentBlock
return (merkleRoot, merkleTree, cb, prevBlock, bits, rollPrevBlk)
def getMC(self, wantClear = False):
if not self.ready:
with self.readyCV:
while not self.ready:
self.readyCV.wait()
(prevBlock, height, bits) = self.currentBlock
mt = self.curClearMerkleTree if wantClear else self.currentMerkleTree
cb = self.makeCoinbase(height=height)
rollPrevBlk = (mt == self.curClearMerkleTree)
return (height, mt, cb, prevBlock, bits, rollPrevBlk)
# merkleMaker tests
def _test():
global now
now = 1337039788
MM = merkleMaker()
reallogger = MM.logger
class fakelogger:
LO = False
def critical(self, *a):
if self.LO > 1: return
reallogger.critical(*a)
def warning(self, *a):
if self.LO: return
reallogger.warning(*a)
def debug(self, *a):
pass
MM.logger = fakelogger()
class NMTClass:
pass
# _makeBlockSafe tests
from copy import deepcopy
MP = {
'coinbasevalue':50,
}
txnlist = [b'\0', b'\x01', b'\x02']
txninfo = [{'fee':0, 'sigops':1}, {'fee':5, 'sigops':10000}, {'fee':0, 'sigops':10001}]
def MBS(LO = 0):
m = deepcopy( (MP, txnlist, txninfo) )
MM.logger.LO = LO
try:
MM._makeBlockSafe(*m)
except:
if LO < 2:
raise
else:
assert LO < 2 # An expected error wasn't thrown
if 'POTInfo' in m[0]:
del m[0]['POTInfo']
return m
MM.POT = 0
assert MBS() == (MP, txnlist[:2], txninfo[:2])
txninfo[2]['fee'] = 1
MPx = deepcopy(MP)
MPx['coinbasevalue'] -= 1
assert MBS() == (MPx, txnlist[:2], txninfo[:2])
txninfo[2]['sigops'] = 1
assert MBS(1) == (MP, txnlist, txninfo)
# APOT tests
MM.POT = 2
txnlist.append(b'\x03')
txninfo.append({'fee':1, 'sigops':0})
MPx = deepcopy(MP)
MPx['coinbasevalue'] -= 1
assert MBS() == (MPx, txnlist[:3], txninfo[:3])
_test()
| uingei/mm | merklemaker.py | Python | agpl-3.0 | 19,767 | 0.034704 |
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
__version__ = '1.1.0'
__author__ = 'Arista Networks'
| IntelligentVisibility/ztpserver | ztpserver/__init__.py | Python | bsd-3-clause | 1,638 | 0.000611 |
import urllib.request
import pickle
import sys
try:
import Variables as v
except:
class var():
def __init__(self):
self.screen = None
v = var()
import pygame as py
class textLabel(py.sprite.Sprite):
def __init__(self, text, pos, colour, font, size, variable = False, centred = False):
super().__init__()
self.text = text
self.pos = pos
self.colour = colour
self.font = font
self.size = size
self.variable = variable
self.centred = centred
def update(self):
pos = self.pos
font = py.font.Font(self.font, self.size)
if not self.variable:
label = font.render(self.text, 1, self.colour)
if self.variable:
label = font.render(str(getattr(v, self.text)), 1, self.colour)
if self.centred:
pos = list(self.pos)
pos[0] -= font.size(self.text)[0] / 2
pos[1] -= font.size(self.text)[1] / 2
pos = tuple(pos)
v.screen.blit(label, pos)
class Button(py.sprite.Sprite):
def __init__(self, text, pos, size, hovercolour, normalcolour, font, ID, centred = False, bsize=(0,0)):
super().__init__()
self.ID = ID
self.hovered = False
self.text = text
self.pos = pos
self.hcolour = hovercolour
self.ncolour = normalcolour
self.font = font
self.font = py.font.Font(font, int(size))
self.centred = centred
self.size = bsize
self.set_rect()
def update(self):
self.set_rend()
py.draw.rect(v.screen, self.get_color(), self.rect)
v.screen.blit(self.rend, self.rect)
if self.rect.collidepoint(v.mouse_pos):
self.hovered = True
else:
self.hovered = False
def set_rend(self):
self.rend = self.font.render(self.text, True, (0,0,0))
def get_color(self):
if self.hovered:
return self.hcolour
else:
return self.ncolour
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
if not self.centred:
self.rect.topleft = self.pos
if self.centred:
self.rect.center = self.pos
if not self.size[0] == 0:
self.rect.width = self.size[0]
if not self.size[1] == 0:
self.rect.height = self.size[1]
def pressed(self):
mouse = v.mouse_pos
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else: return False
else: return False
else: return False
else: return False
import os, shutil
try:
shutil.copyfile("Resources/Fonts/Vecna.otf", "Update/Vecna.otf")
theFont = "Update/Vecna.otf"
except:
theFont = None
py.init()
v.screen = py.display.set_mode((640, 480))
v.screen.fill((20, 20, 20))
textLabel("Checking For Updates...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
tries = 0
def reporthook(count, blockSize, totalSize):
if totalSize == -1:
print("FAILED TOTALSIZE")
raise Exception()
#Shows percentage of download
py.event.pump()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
percent = int(count*blockSize*100/totalSize)
rect = py.Rect(100, 240, percent*4.4, 30)
v.screen.fill((20, 20, 20))
py.draw.rect(v.screen, (255, 0, 0), rect)
py.draw.rect(v.screen, (0, 0, 0), rect, 2)
py.draw.rect(v.screen, (0, 0, 0), (100, 240, 440, 30), 2)
#font = py.font.Font(theFont, 25)
#title = font.render("Downloading...", 1, (255, 255, 255))
#progress = font.render(str(percent) + "%", 1, (255, 255, 255))
#v.screen.blit(title, (200, 200))
#v.screen.blit(progress, (200, 250))
textLabel("Downloading...", (320, 150), (255, 255, 255), theFont, 50, False, True).update()
textLabel(str(percent) + "%", (320, 255), (255, 255, 255), theFont, 20, False, True).update()
py.display.flip()
#sys.stdout.write("\r" + "...%d%%" % percent)
#sys.stdout.flush()
def recursive_overwrite(src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
recursive_overwrite(os.path.join(src, f),
os.path.join(dest, f),
ignore)
else:
shutil.copyfile(src, dest)
def updateCheck():
global latest
page = urllib.request.urlopen('https://github.com/Lightning3105/Legend-Of-Aiopa-RPG/commits/master')
page = str(page.read())
ind = page.find('class="sha btn btn-outline"')
latest = page[ind + 38:ind + 45]
print(latest)
#CHECK IF LATEST IS PROPER
try:
f = open("Saves/current.version", "rb")
current = pickle.load(f)
f.close()
except:
print("create new file")
try:
os.mkdir("Saves")
except:
pass
f = open("Saves/current.version", "wb")
current = 0000
pickle.dump(current, f)
f.close()
print(current, "vs", latest)
if current != latest:
from os import remove
try:
remove("Update/download.zip")
except:
pass
print("downloading latest")
buttons = py.sprite.Group()
buttons.add(Button("Update", (220, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "Y", centred=True))
buttons.add(Button("Ignore", (420, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "N", centred=True))
buttons.add(Button("Skip Update", (320, 300), 40, (100, 100, 100), (255, 255, 255), theFont, "S", centred=True))
labels = py.sprite.Group()
labels.add(textLabel("An Update Is Available:", (320, 150), (255, 255, 255), theFont, 50, False, True))
labels.add(textLabel(str(str(current) + " ==> " + str(latest)), (320, 180), (255, 255, 255), theFont, 20, False, True))
while True:
py.event.pump()
v.screen.fill((20, 20, 20))
buttons.update()
labels.update()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
elif event.type == py.MOUSEBUTTONDOWN:
for button in buttons:
if button.pressed():
id = button.ID
if id == "Y":
global tries
tries = 0
download()
return
if id == "N":
return
if id == "S":
f = open("Saves/current.version", "wb")
current = latest
pickle.dump(current, f)
f.close()
return
py.display.flip()
else:
v.screen.fill((20, 20, 20))
t = textLabel("No Update!", (320, 250), (255, 0, 0), theFont, 70, False, True)
v.current = current
t2 = textLabel("current", (320, 300), (255, 200, 200), theFont, 50, True, True)
t.update()
t2.update()
py.display.update()
if __name__ == "__main__":
py.time.wait(2000)
def download():
global tries
try:
try:
os.mkdir("Update")
except:
pass
urllib.request.urlretrieve("https://github.com/Lightning3105/Legend-Of-Aiopa-RPG/archive/master.zip", "Update/download.zip", reporthook)
f = open("Saves/current.version", "wb")
current = latest
pickle.dump(current, f)
f.close()
unzip()
except Exception as e:
tries += 1
print("Error: " + str(e))
v.screen.fill((20, 20, 20))
textLabel("Download Error. Retry " + str(tries) + "/8", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
textLabel("Error: " + str(e), (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
if tries > 8:
return
download()
def unzip():
v.screen.fill((20, 20, 20))
textLabel("Extracting Data...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
import zipfile
with zipfile.ZipFile('Update/download.zip', "r") as z:
z.extractall("Update/")
v.screen.fill((20, 20, 20))
textLabel("Updating Files...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
from os import getcwd
recursive_overwrite("Update/Legend-Of-Aiopa-RPG-master", getcwd())
if __name__ == "__main__":
updateCheck()
| Lightning3105/Legend-Of-Aiopa-RPG | Updater.py | Python | gpl-2.0 | 9,598 | 0.007293 |
"""Predicted Electoral Vote Count"""
import re
from madcow.util.http import getsoup
from madcow.util.color import ColorLib
from madcow.util import Module, strip_html
class Main(Module):
pattern = re.compile(r'^\s*(election|ev)\s*$', re.I)
help = u'ev - current election 2008 vote prediction'
baseurl = u'http://www.electoral-vote.com/'
def init(self):
if self.madcow is None:
self.colorlib = ColorLib('ansi')
else:
self.colorlib = self.madcow.colorlib
def colorize(self, color, key, val):
return u'%s: %s' % (key, val)
def render(self, node):
pass
def response(self, nick, args, kwargs):
soup = getsoup(self.baseurl)
out = []
for box in soup.find('div', 'score-box').findAll('div', 'box'):
score = []
for key in 'name', 'score':
val = strip_html(box.find('span', key).renderContents()).replace(u'\xa0', u'').strip()
if key == 'name':
if val == u'Obama':
color = 'blue'
elif val == 'Romney':
color = 'red'
else:
color = None
if color:
val = self.colorlib.get_color(color, text=val)
if val:
score.append(val)
if score:
out.append(u'%s: %s' % tuple(score))
return u'%s: %s' % (nick, u', '.join(out))
#from IPython.Shell import IPShellEmbed as S; #S()()
| icucinema/madcow | madcow/modules/election.py | Python | gpl-3.0 | 1,577 | 0.001902 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssl_server
short_description: Configure SSL servers in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ssl_server category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ssl_server:
description:
- Configure SSL servers.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
add_header_x_forwarded_proto:
description:
- Enable/disable adding an X-Forwarded-Proto header to forwarded requests.
type: str
choices:
- enable
- disable
ip:
description:
- IPv4 address of the SSL server.
type: str
mapped_port:
description:
- Mapped server service port (1 - 65535).
type: int
name:
description:
- Server name.
required: true
type: str
port:
description:
- Server service port (1 - 65535).
type: int
ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
type: str
choices:
- high
- medium
- low
ssl_cert:
description:
- Name of certificate for SSL connections to this server. Source vpn.certificate.local.name.
type: str
ssl_client_renegotiation:
description:
- Allow or block client renegotiation by server.
type: str
choices:
- allow
- deny
- secure
ssl_dh_bits:
description:
- Bit-size of Diffie-Hellman (DH) prime used in DHE-RSA negotiation.
type: str
choices:
- 768
- 1024
- 1536
- 2048
ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- SSL/TLS mode for encryption and decryption of traffic.
type: str
choices:
- half
- full
ssl_send_empty_frags:
description:
- Enable/disable sending empty fragments to avoid attack on CBC IV.
type: str
choices:
- enable
- disable
url_rewrite:
description:
- Enable/disable rewriting the URL.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure SSL servers.
fortios_firewall_ssl_server:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssl_server:
add_header_x_forwarded_proto: "enable"
ip: "<your_own_value>"
mapped_port: "5"
name: "default_name_6"
port: "7"
ssl_algorithm: "high"
ssl_cert: "<your_own_value> (source vpn.certificate.local.name)"
ssl_client_renegotiation: "allow"
ssl_dh_bits: "768"
ssl_max_version: "tls-1.0"
ssl_min_version: "tls-1.0"
ssl_mode: "half"
ssl_send_empty_frags: "enable"
url_rewrite: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ssl_server_data(json):
option_list = ['add_header_x_forwarded_proto', 'ip', 'mapped_port',
'name', 'port', 'ssl_algorithm',
'ssl_cert', 'ssl_client_renegotiation', 'ssl_dh_bits',
'ssl_max_version', 'ssl_min_version', 'ssl_mode',
'ssl_send_empty_frags', 'url_rewrite']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ssl_server(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ssl_server'] and data['firewall_ssl_server']:
state = data['firewall_ssl_server']['state']
else:
state = True
firewall_ssl_server_data = data['firewall_ssl_server']
filtered_data = underscore_to_hyphen(filter_firewall_ssl_server_data(firewall_ssl_server_data))
if state == "present":
return fos.set('firewall',
'ssl-server',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ssl-server',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ssl_server']:
resp = firewall_ssl_server(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ssl_server": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"add_header_x_forwarded_proto": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ip": {"required": False, "type": "str"},
"mapped_port": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"ssl_algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low"]},
"ssl_cert": {"required": False, "type": "str"},
"ssl_client_renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl_dh_bits": {"required": False, "type": "str",
"choices": ["768", "1024", "1536",
"2048"]},
"ssl_max_version": {"required": False, "type": "str",
"choices": ["tls-1.0", "tls-1.1", "tls-1.2"]},
"ssl_min_version": {"required": False, "type": "str",
"choices": ["tls-1.0", "tls-1.1", "tls-1.2"]},
"ssl_mode": {"required": False, "type": "str",
"choices": ["half", "full"]},
"ssl_send_empty_frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"url_rewrite": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| simonwydooghe/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssl_server.py | Python | gpl-3.0 | 15,241 | 0.001706 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
import random
from nupic.encoders.utils import bitsToString
class LanguageEncoder(object):
"""
An encoder converts a value to a sparse distributed representation (SDR).
The Encoder superclass implements:
- bitmapToSDR() returns binary SDR of a bitmap
- bitmapFromSDR() returns the bitmap rep of an SDR
- pprintHeader() prints a header describing the encoding to the terminal
- pprint() prints an encoding to the terminal
- decodedToStr() returns pretty print string of decoded SDR
Methods/properties that must be implemented by subclasses:
- encode() returns a numpy array encoding the input
- decode() returns a list of strings representing a decoded SDR
- getWidth() returns the output width, in bits
- getDescription() returns a dict describing the encoded output
"""
def __init__(self, n=16384, w=328):
"""The SDR dimensions are standard for Cortical.io fingerprints."""
self.n = n
self.w = w
self.targetSparsity = 5.0
def encode(self, inputText):
"""
Encodes inputText and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by getWidth().
Note: The numpy output array is reused, so clear it before updating it.
@param inputData (str) Data to encode. This should be validated by
the encoder subclass.
@param output (numpy) 1-D array of same length returned by
getWidth().
"""
raise NotImplementedError
def encodeIntoArray(self, inputText, output):
"""
Encodes inputData and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by getWidth().
Note: The numpy output array is reused, so clear it before updating it.
@param inputData Data to encode. This should be validated by the encoder.
@param output numpy 1-D array of same length returned by getWidth()
"""
raise NotImplementedError
def decode(self, encoded):
"""
Decodes the SDR encoded. See subclass implementation for details; the
decoding approaches and return objects vary depending on the encoder.
To pretty print the return value from this method, use decodedToStr().
@param encoded (numpy) Encoded 1-d array (an SDR).
"""
raise NotImplementedError
def getWidth(self):
"""
Get an encoding's output width in bits. See subclass implementation for
details.
"""
raise NotImplementedError()
def getDescription(self):
"""
Returns a tuple, each containing (name, offset).
The name is a string description of each sub-field, and offset is the bit
offset of the sub-field for that encoder; should be 0.
"""
raise NotImplementedError()
def bitmapToSDR(self, bitmap):
"""Convert SDR encoding from bitmap to binary numpy array."""
sdr = numpy.zeros(self.n)
sdr[bitmap] = 1
return sdr
def bitmapFromSDR(self, sdr):
"""Convert SDR encoding from binary numpy array to bitmap."""
return numpy.array([i for i in range(len(sdr)) if sdr[i]==1])
def encodeRandomly(self, text):
"""Return a random bitmap representation of the sample."""
random.seed(sample)
return numpy.sort(random.sample(xrange(self.n), self.w))
def compare(self, bitmap1, bitmap2):
"""
Compare bitmaps, returning a dict of similarity measures.
@param bitmap1 (list) Indices of ON bits.
@param bitmap2 (list) Indices of ON bits.
@return distances (dict) Key-values of distance metrics and values.
Example return dict:
{
"cosineSimilarity": 0.6666666666666666,
"euclideanDistance": 0.3333333333333333,
"jaccardDistance": 0.5,
"overlappingAll": 6,
"overlappingLeftRight": 0.6666666666666666,
"overlappingRightLeft": 0.6666666666666666,
"sizeLeft": 9,
"sizeRight": 9
}
"""
if not len(bitmap1) > 0 or not len(bitmap2) > 0:
raise ValueError("Bitmaps must have ON bits to compare.")
sdr1 = self.bitmapToSDR(bitmap1)
sdr2 = self.bitmapToSDR(bitmap2)
distances = {
"sizeLeft": float(len(bitmap1)),
"sizeRight": float(len(bitmap2)),
"overlappingAll": float(len(numpy.intersect1d(bitmap1, bitmap2))),
"euclideanDistance": numpy.linalg.norm(sdr1 - sdr2)
}
distances["overlappingLeftRight"] = (distances["overlappingAll"] /
distances["sizeLeft"])
distances["overlappingRightLeft"] = (distances["overlappingAll"] /
distances["sizeRight"])
distances["cosineSimilarity"] = (distances["overlappingAll"] /
(math.sqrt(distances["sizeLeft"]) * math.sqrt(distances["sizeRight"])))
distances["jaccardDistance"] = 1 - (distances["overlappingAll"] /
len(numpy.union1d(bitmap1, bitmap2)))
return distances
def sparseUnion(self, counts):
"""
Bits from the input patterns are unionized and then sparsified.
@param counts (Counter) A count of the ON bits for the union bitmap.
@return (list) A sparsified union bitmap.
"""
max_sparsity = int((self.targetSparsity / 100) * self.n)
w = min(len(counts), max_sparsity)
return [c[0] for c in counts.most_common(w)]
def pprintHeader(self, prefix=""):
"""
Pretty-print a header that labels the sub-fields of the encoded output.
This can be used in conjuction with pprint().
@param prefix printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
name = description[i][0]
width = description[i+1][1] - description[i][1]
formatStr = "%%-%ds |" % width
if len(name) > width:
pname = name[0:width]
else:
pname = name
print formatStr % pname,
print
print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1)
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
@param output to print
@param prefix printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print
def decodedToStr(self, decodeResults):
"""
Return a pretty print string representing the return value from decode().
"""
(fieldsDict, fieldsOrder) = decodeResults
desc = ''
for fieldName in fieldsOrder:
(ranges, rangesStr) = fieldsDict[fieldName]
if len(desc) > 0:
desc += ", %s:" % (fieldName)
else:
desc += "%s:" % (fieldName)
desc += "[%s]" % (rangesStr)
return desc
| akhilaananthram/nupic.fluent | fluent/encoders/language_encoder.py | Python | agpl-3.0 | 7,974 | 0.005769 |
# Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
| rahulunair/nova | nova/virt/libvirt/volume/quobyte.py | Python | apache-2.0 | 7,552 | 0 |
"""
WSGI config for astrology project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.prod")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| flp9001/clevenus | clevenus/config/wsgi.py | Python | gpl-3.0 | 395 | 0.002532 |
import os
import sys
import sqlite3
import logging
from tqdm import tqdm
from pathlib import Path
from whoosh.index import create_in, open_dir
from whoosh.fields import Schema, TEXT, NUMERIC
from whoosh.qparser import QueryParser
from whoosh.spelling import ListCorrector
from whoosh.highlight import UppercaseFormatter
logging.basicConfig(level=logging.INFO)
if getattr(sys, 'frozen', False):
APPLICATION_PATH = os.path.dirname(sys.executable)
elif __file__:
APPLICATION_PATH = os.path.dirname(__file__)
PATH = APPLICATION_PATH
PATH_DATA = Path(PATH) / 'data'
FILE_DB = PATH_DATA / "data.db"
class Searcher:
def __init__(self):
self.scope = 20
self.terms = set()
self.index_path = "index"
self.common_terms = set()
self.schema = Schema(
title=TEXT(stored=True),
path=TEXT(stored=True),
page=NUMERIC(stored=True),
content=TEXT(stored=True))
self.ix = None
self.index_files = False
if not os.path.exists(self.index_path):
os.mkdir(self.index_path)
self.ix = create_in(self.index_path, self.schema)
self.index_files = True
else:
self.ix = open_dir(self.index_path)
self.writer = self.ix.writer()
self.read()
self.writer.commit()
self.searcher = self.ix.searcher()
self.corrector = ListCorrector(sorted(list(self.common_terms)))
self.parser = QueryParser("content", self.ix.schema)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.searcher.close()
def search(self, term):
results = []
suggestions = [term]+(self.corrector.suggest(term, limit=5))
for t in suggestions:
query = self.parser.parse(t)
query_res = self.searcher.search(query, limit=100)
query_res.fragmenter.maxchars = 300
query_res.fragmenter.surround = 100
query_res.formatter = UppercaseFormatter()
results.append((t, query_res))
return results
def read(self):
logging.info("Indexing")
con = sqlite3.connect(str(FILE_DB))
cur = con.cursor()
cur.execute(r"SELECT BOOKS.NAME, PAGE, CONTENT "
r"FROM TEXT, BOOKS "
r"WHERE BOOK = BOOKS.ID "
r"ORDER BY BOOKS.NAME, PAGE")
for row in tqdm(cur):
book, page, content = row
book, page, content = str(book), str(page), str(content)
for i in content.split(' '):
self.common_terms.add(i)
if self.index_files:
self.writer.add_document(title=book, content=content, path=book, page=page)
| DeastinY/srpdfcrawler | pdf_search.py | Python | gpl-3.0 | 2,775 | 0.000721 |
import avango
import avango.script
import avango.gua
from examples_common.GuaVE import GuaVE
class TimedRotate(avango.script.Script):
TimeIn = avango.SFFloat()
MatrixOut = avango.gua.SFMatrix4()
def evaluate(self):
self.MatrixOut.value = avango.gua.make_rot_mat(
self.TimeIn.value * 2.0, 0.0, 1.0, 0.0)
def start():
# setup scenegraph
graph = avango.gua.nodes.SceneGraph(Name="scenegraph")
loader = avango.gua.nodes.TriMeshLoader()
monkey1 = loader.create_geometry_from_file(
"monkey", "data/objects/monkey.obj",
avango.gua.LoaderFlags.NORMALIZE_SCALE)
monkey2 = loader.create_geometry_from_file(
"monkey", "data/objects/monkey.obj",
avango.gua.LoaderFlags.NORMALIZE_SCALE)
monkey1.Material.value.set_uniform(
"Color", avango.gua.Vec4(1.0, 0.766, 0.336, 1.0))
monkey1.Material.value.set_uniform("Roughness", 0.3)
monkey1.Material.value.set_uniform("Metalness", 1.0)
monkey2.Material.value.set_uniform(
"Color", avango.gua.Vec4(1.0, 0.266, 0.136, 1.0))
monkey2.Material.value.set_uniform("Roughness", 0.6)
monkey2.Material.value.set_uniform("Metalness", 0.0)
transform1 = avango.gua.nodes.TransformNode(Children=[monkey1])
transform2 = avango.gua.nodes.TransformNode(
Transform=avango.gua.make_trans_mat(-0.5, 0.0, 0.0),
Children=[monkey2])
light = avango.gua.nodes.LightNode(
Type=avango.gua.LightType.POINT,
Name="light",
Color=avango.gua.Color(1.0, 1.0, 1.0),
Brightness=100.0,
Transform=(avango.gua.make_trans_mat(1, 1, 5) *
avango.gua.make_scale_mat(30, 30, 30)))
size = avango.gua.Vec2ui(1024, 768)
window = avango.gua.nodes.GlfwWindow(Size=size, LeftResolution=size)
avango.gua.register_window("window", window)
cam = avango.gua.nodes.CameraNode(
LeftScreenPath="/screen",
SceneGraph="scenegraph",
Resolution=size,
OutputWindowName="window",
Transform=avango.gua.make_trans_mat(0.0, 0.0, 3.5))
res_pass = avango.gua.nodes.ResolvePassDescription()
res_pass.EnableSSAO.value = True
res_pass.SSAOIntensity.value = 4.0
res_pass.SSAOFalloff.value = 10.0
res_pass.SSAORadius.value = 7.0
#res_pass.EnableScreenSpaceShadow.value = True
res_pass.EnvironmentLightingColor.value = avango.gua.Color(0.1, 0.1, 0.1)
res_pass.ToneMappingMode.value = avango.gua.ToneMappingMode.UNCHARTED
res_pass.Exposure.value = 1.0
res_pass.BackgroundColor.value = avango.gua.Color(0.45, 0.5, 0.6)
anti_aliasing = avango.gua.nodes.SSAAPassDescription()
pipeline_description = avango.gua.nodes.PipelineDescription(
Passes=[
avango.gua.nodes.TriMeshPassDescription(),
avango.gua.nodes.LightVisibilityPassDescription(),
res_pass,
anti_aliasing,
])
cam.PipelineDescription.value = pipeline_description
screen = avango.gua.nodes.ScreenNode(
Name="screen",
Width=2,
Height=1.5,
Children=[cam])
graph.Root.value.Children.value = [transform1, transform2, light, screen]
#setup viewer
viewer = avango.gua.nodes.Viewer()
viewer.SceneGraphs.value = [graph]
viewer.Windows.value = [window]
monkey_updater = TimedRotate()
timer = avango.nodes.TimeSensor()
monkey_updater.TimeIn.connect_from(timer.Time)
transform1.Transform.connect_from(monkey_updater.MatrixOut)
guaVE = GuaVE()
guaVE.start(locals(), globals())
viewer.run()
if __name__ == '__main__':
start()
| jakobharlan/avango | examples/simple_example/main.py | Python | lgpl-3.0 | 3,625 | 0.000552 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT license
Copyright (c) 2010 Jonas Nockert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---
XEP-0012: Last Activity handler
"""
from datetime import datetime
import time
from twisted.words.protocols.jabber.xmlstream import toResponse
from wokkel.subprotocols import IQHandlerMixin, XMPPHandler
NS_LAST_ACTIVITY = 'jabber:iq:last'
LAST_ACTIVITY = '/iq[@type="get"]/query[@xmlns="' + NS_LAST_ACTIVITY +'"]'
class LastActivityHandler(XMPPHandler, IQHandlerMixin):
"""
XMPP subprotocol handler for Last Activity extension.
This protocol is described in
U{XEP-0012<http://www.xmpp.org/extensions/xep-0012.html>}.
"""
iqHandlers = {LAST_ACTIVITY: 'onLastActivityGet'}
def __init__(self, get_last=lambda: 0):
self.get_last = get_last
def connectionInitialized(self):
self.xmlstream.addObserver(LAST_ACTIVITY, self.handleRequest)
def onLastActivityGet(self, iq):
"""Handle a request for last activity."""
response = toResponse(iq, 'result')
# TODO: Replace 'hello world!' string with something proper.
query = response.addElement((NS_LAST_ACTIVITY, 'query'),
content="Hello world!")
query['seconds'] = str(self.get_last())
self.send(response)
iq.handled = True
| lemonad/molnetbot | molnetbot/xep0012.py | Python | mit | 2,344 | 0.000427 |
import numpy as np
import pytest
from nilabels.tools.image_colors_manipulations.relabeller import relabeller, permute_labels, erase_labels, \
assign_all_other_labels_the_same_value, keep_only_one_label, relabel_half_side_one_label
def test_relabeller_basic():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, range(10), range(10)[::-1])
np.testing.assert_array_equal(relabelled_data, np.array(range(10)[::-1]).reshape(2,5))
def test_relabeller_one_element():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 0, 1, verbose=1)
expected_output = data[:]
expected_output[0, 0] = 1
np.testing.assert_array_equal(relabelled_data, expected_output)
def test_relabeller_one_element_not_in_array():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 15, 1, verbose=1)
np.testing.assert_array_equal(relabelled_data, data)
def test_relabeller_wrong_input():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabeller(data, [1, 2], [3, 4, 4])
def test_permute_labels_invalid_permutation():
invalid_permutation = [[3, 3, 3], [1, 1, 1]]
with pytest.raises(IOError):
permute_labels(np.zeros([3, 3]), invalid_permutation)
def test_permute_labels_valid_permutation():
data = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
valid_permutation = [[1, 2, 3], [1, 3, 2]]
perm_data = permute_labels(data, valid_permutation)
expected_data = np.array([[1, 3, 2],
[1, 3, 2],
[1, 3, 2]])
np.testing.assert_equal(perm_data, expected_data)
def test_erase_label_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
expected_output = data[:]
expected_output[0, 1] = 0
np.testing.assert_array_equal(data_erased_1, expected_output)
def test_assign_all_other_labels_the_same_values_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
data_labels_to_keep = assign_all_other_labels_the_same_value(data, range(2, 10), same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_assign_all_other_labels_the_same_values_single_value():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = np.zeros_like(data)
data_erased_1[0, 1] = 1
data_labels_to_keep = assign_all_other_labels_the_same_value(data, 1, same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_keep_only_one_label_label_simple():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 1)
expected_data = np.zeros([2, 5])
expected_data[0, 1] = 1
np.testing.assert_array_equal(new_data, expected_data)
def test_keep_only_one_label_label_not_present():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 120)
np.testing.assert_array_equal(new_data, data)
def test_relabel_half_side_one_label_wrong_input_shape():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_side():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='spam',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_axis():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='spam', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_simple():
data = np.array(range(3 ** 3)).reshape(3, 3, 3)
# Z above
new_data = relabel_half_side_one_label(data, label_old=1, label_new=100, side_to_modify='above',
axis='z', plane_intercept=1)
expected_data = data[:]
expected_data[0, 0, 1] = 100
np.testing.assert_array_equal(new_data, expected_data)
# Z below
new_data = relabel_half_side_one_label(data, label_old=3, label_new=300, side_to_modify='below',
axis='z', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 0] = 300
np.testing.assert_array_equal(new_data, expected_data)
# Y above
new_data = relabel_half_side_one_label(data, label_old=8, label_new=800, side_to_modify='above',
axis='y', plane_intercept=1)
expected_data = data[:]
expected_data[0, 2, 2] = 800
np.testing.assert_array_equal(new_data, expected_data)
# Y below
new_data = relabel_half_side_one_label(data, label_old=6, label_new=600, side_to_modify='below',
axis='y', plane_intercept=2)
expected_data = data[:]
expected_data[0, 2, 0] = 600
np.testing.assert_array_equal(new_data, expected_data)
# X above
new_data = relabel_half_side_one_label(data, label_old=18, label_new=180, side_to_modify='above',
axis='x', plane_intercept=1)
expected_data = data[:]
expected_data[2, 0, 0] = 180
np.testing.assert_array_equal(new_data, expected_data)
# X below
new_data = relabel_half_side_one_label(data, label_old=4, label_new=400, side_to_modify='below',
axis='x', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 1] = 400
np.testing.assert_array_equal(new_data, expected_data)
if __name__ == '__main__':
test_relabeller_basic()
test_relabeller_one_element()
test_relabeller_one_element_not_in_array()
test_relabeller_wrong_input()
test_permute_labels_invalid_permutation()
test_permute_labels_valid_permutation()
test_erase_label_simple()
test_assign_all_other_labels_the_same_values_simple()
test_assign_all_other_labels_the_same_values_single_value()
test_keep_only_one_label_label_simple()
test_keep_only_one_label_label_not_present()
test_relabel_half_side_one_label_wrong_input_shape()
test_relabel_half_side_one_label_wrong_input_side()
test_relabel_half_side_one_label_wrong_input_axis()
test_relabel_half_side_one_label_wrong_input_simple()
| SebastianoF/LabelsManager | tests/tools/test_image_colors_manip_relabeller.py | Python | mit | 6,757 | 0.00222 |
#!/usr/bin/env python
#
# Copyright 2014 Matthew Wall
# See the file LICENSE.txt for your rights.
"""Driver for ADS WS1 weather stations.
Thanks to Steve (sesykes71) for the testing that made this driver possible.
Thanks to Jay Nugent (WB8TKL) and KRK6 for weather-2.kr6k-V2.1
http://server1.nuge.com/~weather/
"""
from __future__ import with_statement
import serial
import syslog
import time
import weewx.drivers
DRIVER_NAME = 'WS1'
DRIVER_VERSION = '0.19'
def loader(config_dict, _):
return WS1Driver(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WS1ConfEditor()
INHG_PER_MBAR = 0.0295333727
METER_PER_FOOT = 0.3048
MILE_PER_KM = 0.621371
DEFAULT_PORT = '/dev/ttyS0'
DEBUG_READ = 0
def logmsg(level, msg):
syslog.syslog(level, 'ws1: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class WS1Driver(weewx.drivers.AbstractDevice):
"""weewx driver that communicates with an ADS-WS1 station
port - serial port
[Required. Default is /dev/ttyS0]
max_tries - how often to retry serial communication before giving up
[Optional. Default is 5]
retry_wait - how long to wait, in seconds, before retrying after a failure
[Optional. Default is 10]
"""
def __init__(self, **stn_dict):
self.port = stn_dict.get('port', DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 10))
self.last_rain = None
loginf('driver version is %s' % DRIVER_VERSION)
loginf('using serial port %s' % self.port)
global DEBUG_READ
DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ))
self.station = Station(self.port)
self.station.open()
def closePort(self):
if self.station is not None:
self.station.close()
self.station = None
@property
def hardware_name(self):
return "WS1"
def genLoopPackets(self):
while True:
packet = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.US}
readings = self.station.get_readings_with_retry(self.max_tries,
self.retry_wait)
data = Station.parse_readings(readings)
packet.update(data)
self._augment_packet(packet)
yield packet
def _augment_packet(self, packet):
# calculate the rain delta from rain total
if self.last_rain is not None:
packet['rain'] = packet['long_term_rain'] - self.last_rain
else:
packet['rain'] = None
self.last_rain = packet['long_term_rain']
# no wind direction when wind speed is zero
if 'windSpeed' in packet and not packet['windSpeed']:
packet['windDir'] = None
class Station(object):
def __init__(self, port):
self.port = port
self.baudrate = 2400
self.timeout = 3
self.serial_port = None
def __enter__(self):
self.open()
return self
def __exit__(self, _, value, traceback):
self.close()
def open(self):
logdbg("open serial port %s" % self.port)
self.serial_port = serial.Serial(self.port, self.baudrate,
timeout=self.timeout)
def close(self):
if self.serial_port is not None:
logdbg("close serial port %s" % self.port)
self.serial_port.close()
self.serial_port = None
# FIXME: use either CR or LF as line terminator. apparently some ws1
# hardware occasionally ends a line with only CR instead of the standard
# CR-LF, resulting in a line that is too long.
def get_readings(self):
buf = self.serial_port.readline()
if DEBUG_READ:
logdbg("bytes: '%s'" % ' '.join(["%0.2X" % ord(c) for c in buf]))
buf = buf.strip()
return buf
def get_readings_with_retry(self, max_tries=5, retry_wait=10):
for ntries in range(0, max_tries):
try:
buf = self.get_readings()
Station.validate_string(buf)
return buf
except (serial.serialutil.SerialException, weewx.WeeWxIOError), e:
loginf("Failed attempt %d of %d to get readings: %s" %
(ntries + 1, max_tries, e))
time.sleep(retry_wait)
else:
msg = "Max retries (%d) exceeded for readings" % max_tries
logerr(msg)
raise weewx.RetriesExceeded(msg)
@staticmethod
def validate_string(buf):
if len(buf) != 50:
raise weewx.WeeWxIOError("Unexpected buffer length %d" % len(buf))
if buf[0:2] != '!!':
raise weewx.WeeWxIOError("Unexpected header bytes '%s'" % buf[0:2])
return buf
@staticmethod
def parse_readings(raw):
"""WS1 station emits data in PeetBros format:
http://www.peetbros.com/shop/custom.aspx?recid=29
Each line has 50 characters - 2 header bytes and 48 data bytes:
!!000000BE02EB000027700000023A023A0025005800000000
SSSSXXDDTTTTLLLLPPPPttttHHHHhhhhddddmmmmRRRRWWWW
SSSS - wind speed (0.1 kph)
XX - wind direction calibration
DD - wind direction (0-255)
TTTT - outdoor temperature (0.1 F)
LLLL - long term rain (0.01 in)
PPPP - pressure (0.1 mbar)
tttt - indoor temperature (0.1 F)
HHHH - outdoor humidity (0.1 %)
hhhh - indoor humidity (0.1 %)
dddd - date (day of year)
mmmm - time (minute of day)
RRRR - daily rain (0.01 in)
WWWW - one minute wind average (0.1 kph)
"""
# FIXME: peetbros could be 40 bytes or 44 bytes, what about ws1?
# FIXME: peetbros uses two's complement for temp, what about ws1?
# FIXME: for ws1 is the pressure reading 'pressure' or 'barometer'?
buf = raw[2:]
data = dict()
data['windSpeed'] = Station._decode(buf[0:4], 0.1 * MILE_PER_KM) # mph
data['windDir'] = Station._decode(buf[6:8], 1.411764) # compass deg
data['outTemp'] = Station._decode(buf[8:12], 0.1) # degree_F
data['long_term_rain'] = Station._decode(buf[12:16], 0.01) # inch
data['pressure'] = Station._decode(buf[16:20], 0.1 * INHG_PER_MBAR) # inHg
data['inTemp'] = Station._decode(buf[20:24], 0.1) # degree_F
data['outHumidity'] = Station._decode(buf[24:28], 0.1) # percent
data['inHumidity'] = Station._decode(buf[28:32], 0.1) # percent
data['day_of_year'] = Station._decode(buf[32:36])
data['minute_of_day'] = Station._decode(buf[36:40])
data['daily_rain'] = Station._decode(buf[40:44], 0.01) # inch
data['wind_average'] = Station._decode(buf[44:48], 0.1 * MILE_PER_KM) # mph
return data
@staticmethod
def _decode(s, multiplier=None, neg=False):
v = None
try:
v = int(s, 16)
if neg:
bits = 4 * len(s)
if v & (1 << (bits - 1)) != 0:
v -= (1 << bits)
if multiplier is not None:
v *= multiplier
except ValueError, e:
if s != '----':
logdbg("decode failed for '%s': %s" % (s, e))
return v
class WS1ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WS1]
# This section is for the ADS WS1 series of weather stations.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = /dev/ttyUSB0
# The driver to use:
driver = weewx.drivers.ws1
"""
def prompt_for_settings(self):
print "Specify the serial port on which the station is connected, for"
print "example /dev/ttyUSB0 or /dev/ttyS0."
port = self._prompt('port', '/dev/ttyUSB0')
return {'port': port}
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/ws1.py
if __name__ == '__main__':
import optparse
usage = """%prog [options] [--help]"""
syslog.openlog('ws1', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--port', dest='port', metavar='PORT',
help='serial port to which the station is connected',
default=DEFAULT_PORT)
(options, args) = parser.parse_args()
if options.version:
print "ADS WS1 driver version %s" % DRIVER_VERSION
exit(0)
with Station(options.port) as s:
while True:
print time.time(), s.get_readings()
| RobCranfill/weewx | bin/weewx/drivers/ws1.py | Python | gpl-3.0 | 9,124 | 0.000986 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
import uuid, pprint
def generate(data):
gdata = []
for grade in range(1,4):
for clazz in range(1,10):
if grade != data['grade_number'] and clazz != data['class_number']:
gdata.append("insert into classes(uuid, grade_number, class_number, school_uuid) values('%s', %d, %d, '%s');" % (unicode(uuid.uuid4()), grade, clazz, data['school_uuid']))
return gdata
def main():
config = {'user': 'root', 'passwd': 'oseasy_db', 'db': 'banbantong', 'use_unicode': True, 'charset': 'utf8'}
conn = mdb.connect(**config)
if not conn: return
cursor = conn.cursor()
cursor.execute('select grade_number, class_number, school_uuid from classes;')
base = {}
desc = cursor.description
data = cursor.fetchone()
for i, x in enumerate(data):
base[desc[i][0]] = data[i]
moreData = generate(base)
#cursor.executemany('insert into classes(uuid, grade_number, class_number, school_uuid) values(%s, %d, %d, %s)', moreData)
for sql in moreData:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if __name__ == "__main__":
main() | aaronzhang1990/workshare | test/python/addClasses.py | Python | gpl-2.0 | 1,111 | 0.032403 |
#!/usr/bin/env python
# coding=utf-8
import errno
import os
import sys
import fileinput
import string
import logging
import traceback
import hashlib
import time
import re
from datetime import date, timedelta
import datetime
from subprocess import call
import redis
from datasource import DataSource
class Items(DataSource):
def __init__(self, redisClientManager, config, act):
DataSource.__init__(self, config, act)
self.redisClientManager = redisClientManager
self.downloadedDir = ""
self.key = ""
if os.path.exists(self.dir + "/downloaded.txt"):
with open(self.dir + "/downloaded.txt", 'r') as content_file:
self.downloadedDir = content_file.read()
def saveDownloadedDir(self, dir):
self.downloadedDir = dir
with open(self.dir + "/downloaded.txt", "w") as text_file:
text_file.write(dir)
def isOkFloatString(self, value):
for c in value:
if c == '.':
continue
if ord(c) <48 or ord(c) > 57:
return False
return True
def download(self):
try:
cmd = "rm -rf " + self.dir + "/*"
call(cmd, shell=True)
cmd = "hadoop fs -get " + self.download_url + " " + self.dir
logging.info("[" + self.name + "]" + "Downloading file:" + self.download_url)
retcode = call(cmd, shell=True)
if retcode != 0:
logging.error("Child was terminated by signal:" + str(retcode) + " for cmd:" + cmd)
return False
else:
self.saveDownloadedDir(self.datedir)
return True
except:
tb = traceback.format_exc()
logging.error("Some error occured:\n" + tb)
return False
def __parseImport(self, filename, name):
file = open(filename, 'r')
count = 0
ff = name.split('_')
prefix= self.config["prefix"] + ":"
while 1:
lines = file.readlines(10000)
if not lines:
break
for line in lines:
line = line.strip()
if count % 100000 == 0 and count != 0:
logging.info("[" + self.name + "]" + str(count) + " lines parsed and imported to redis for file:" + filename)
count = count + 1
#ss = re.split(r'\t+', line.rstrip('\t'))
line = line.rstrip("\n")
ss = line.split("\t")
if len(ss) != 11:
print "fxxk you man!"
exit(1)
#poi_id = ss[0]
#�쳣�ַ���
poi_id = ss[0]
if not all(ord(c) < 128 for c in poi_id):
logging.error("[" + self.name + "]Discard invalid line:" + line + "\n")
continue
if len(poi_id) > 50:
logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!")
continue
key = prefix + poi_id
value = ""
i = 1
tag = 0
while i < len(ss):
# if not self.isOkFloatString(ss[i]):
# tag = 1
# break
if i == 1:
value = ss[i]
else:
value = value + ":" + ss[i]
i = i+1
# if tag == 1:
# logging.error("filename:" + filename + ", line:" + str(count) +", not all nums are right")
# continue
clients = self.redisClientManager.getClientsByShardKey("items", poi_id)
self.cnt+=1
if self.key != key:
for client in clients:
client.pipeline.delete(key)
self.key = key
for client in clients:
client.pipeline.sadd(key, value)
client.IncrPipeCount()
if client.pipecount >= 100:
client.commit()
file.close()
return True
def parseImport(self):
fs = os.listdir(self.dir)
for file in fs:
if file == "status.txt" or file == "downloaded.txt":
continue
while True:
try:
logging.info("[" + self.name + "]Start parsing import data from file:" + file)
self.__parseImport(self.dir + "/" + file, file)
self.redisClientManager.commitClients("items")
break
except:
tb = traceback.format_exc()
logging.error("Some error occured to parsing import file:" + file + "\n" + tb)
time.sleep(60)
return True
def __delete(self, filename):
fi = open(filename, 'r')
count = 0
prefix= self.config["prefix"] + ":"
while 1:
lines = fi.readlines(10000)
if not lines:
break
for line in lines:
line = line.strip()
if count % 100000 == 0 and count != 0:
logging.info("[" + self.name + "]" + str(count) + " lines parsed and deleted from redis for file:" + filename)
count = count + 1
ss = re.split(r'\t+', line.rstrip('\t'))
#poi_id = ss[0]
poi_id = ss[0]
if not all(ord(c) < 128 for c in poi_id):
logging.error("[" + self.name + "]Discard invalid line:" + line + "\n")
continue
if len(poi_id) > 50:
logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!")
continue
key = prefix + poi_id
clients = self.redisClientManager.getClientsByShardKey("items", poi_id)
for client in clients:
client.pipeline.delete(key)
client.IncrPipeCount()
if client.pipecount >= 100:
client.commit()
fi.close()
return True
def delete(self):
fs = os.listdir(self.dir)
for fi in fs:
if fi == "status.txt" or fi == "downloaded.txt":
continue
while True:
try:
logging.info("[" + self.name + "]Start parsing delete data from file:" + fi)
self.__delete(self.dir + "/" + fi)
self.redisClientManager.commitClients("items")
break
except:
tb = traceback.format_exc()
logging.error("Some error occured to parsing delete file:" + fi + "\n" + tb)
time.sleep(60)
return True
def checkAvailable(self):
try:
if self.action == "import":
yesterday = date.today() - timedelta(1)
self.datedir = yesterday.strftime('%Y%m%d')
#self.datedir = "."
if self.datedir == self.downloadedDir:
return 0
elif self.action == "delete":
self.datedir = self.del_date
if self.datedir == self.downloadedDir:
return 2
self.download_url = self.config["url"].replace("${date}", self.datedir)
donefile = self.config["checkfile"].replace("${date}", self.datedir)
cmd = "hadoop fs -test -e " + donefile
retcode = call(cmd, shell=True)
if retcode == 0:
return 1
return 0
except:
tb = traceback.format_exc()
logging.error("Some error occured:\n" + tb)
return 0
return 0
| junneyang/simplepbrpc | homedir/items.py | Python | mit | 8,104 | 0.006674 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRngtools(RPackage):
"""This package contains a set of functions for working with Random Number
Generators (RNGs). In particular, it defines a generic S4 framework for
getting/setting the current RNG, or RNG data that are embedded into objects
for reproducibility. Notably, convenient default methods greatly facilitate
the way current RNG settings can be changed."""
homepage = "https://renozao.github.io/rngtools"
url = "https://cran.r-project.org/src/contrib/rngtools_1.2.4.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/rngtools"
version('1.2.4', '715967f8b3af2848a76593a7c718c1cd')
depends_on('r-pkgmaker', type=('build', 'run'))
depends_on('r-stringr', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
| EmreAtes/spack | var/spack/repos/builtin/packages/r-rngtools/package.py | Python | lgpl-2.1 | 2,067 | 0.000484 |
"""Test the roon config flow."""
from homeassistant import config_entries, setup
from homeassistant.components.roon.const import DOMAIN
from homeassistant.const import CONF_HOST
from tests.async_mock import patch
from tests.common import MockConfigEntry
class RoonApiMock:
"""Mock to handle returning tokens for testing the RoonApi."""
def __init__(self, token):
"""Initialize."""
self._token = token
@property
def token(self):
"""Return the auth token from the api."""
return self._token
def stop(self): # pylint: disable=no-self-use
"""Close down the api."""
return
async def test_form_and_auth(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_no_token(hass):
"""Test we handle no token being returned (timeout or not authorized)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock(None),
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_unknown_exception(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.roon.config_flow.RoonApi",
side_effect=Exception,
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_host_already_exists(hass):
"""Test we add the host if the config exists and it isn't a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_duplicate_host(hass):
"""Test we don't add the host if it's a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "existing_host"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "duplicate_entry"}
| GenericStudent/home-assistant | tests/components/roon/test_config_flow.py | Python | apache-2.0 | 5,782 | 0.000346 |
"""engine.SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixf77.py 2014/08/24 12:12:31 garyo"
import os.path
#import SCons.Platform.aix
import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call the in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf77):
return xlf77
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| engineer0x47/SCONS | engine/SCons/Tool/aixf77.py | Python | mit | 2,681 | 0.001865 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import autocomplete_light
from django.utils.encoding import force_text
from .settings import USER_MODEL
from .utils.module_loading import get_real_model_class
class UserAutocomplete(autocomplete_light.AutocompleteModelBase):
search_fields = [
'^first_name',
'last_name',
'username'
]
model = get_real_model_class(USER_MODEL)
order_by = ['first_name', 'last_name']
# choice_template = 'django_documentos/user_choice_autocomplete.html'
limit_choices = 10
attrs = {
'data-autcomplete-minimum-characters': 0,
'placeholder': 'Pessoa que irá assinar',
}
# widget_attrs = {'data-widget-maximum-values': 3}
def choice_value(self, choice):
"""
Return the pk of the choice by default.
"""
return choice.pk
def choice_label(self, choice):
"""
Return the textual representation of the choice by default.
"""
# return force_text("{}-{}".format(choice.pk, choice.get_full_name().title()))
return force_text(choice.get_full_name().title())
# def choice_label(self, choice):
# return choice.get_full_name().title()
def choices_for_request(self):
return super(UserAutocomplete, self).choices_for_request()
autocomplete_light.register(UserAutocomplete)
| luzfcb/django_documentos | django_documentos/autocomplete_light_registry.py | Python | bsd-3-clause | 1,391 | 0.000719 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Muduo
'''
FastSync
'''
from setuptools import setup, find_packages
setup(
name='FastSync',
version='0.2.0.3',
packages=find_packages(),
install_requires=[
'requests',
'watchdog',
'pycrypto',
'future',
'web.py'
],
entry_points={
'console_scripts': [
'fsnd = sync:sending',
'frcv = sync:receiving',
],
},
license='Apache License',
author='Muduo',
author_email='imuduo@163.com',
url='https://github.com/iMuduo/FastSync',
description='Event driven fast synchronization tool',
keywords=['sync'],
)
| iMuduo/FastSync | setup.py | Python | apache-2.0 | 690 | 0 |
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import re
import sys
import copy
import warnings
import pytest
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets import make_spd_matrix
from io import StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture import GaussianMixture
from sklearn.mixture._gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical,
_compute_precision_cholesky,
_compute_log_det_cholesky,
)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData:
def __init__(self, rng, n_samples=200, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
dtype=int)
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
msg = (
f"Invalid value for 'n_components': {n_components_bad} "
"Estimation requires at least one component"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
msg = (
f"Invalid value for 'covariance_type': {covariance_type_bad} "
"'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']"
)
with pytest.raises(ValueError):
gmm.fit(X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
msg = (
f"Invalid value for 'tol': {tol_bad:.5f} "
"Tolerance used by the EM must be non-negative"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
msg = (
f"Invalid value for 'reg_covar': {reg_covar_bad:.5f} "
"regularization on covariance must be non-negative"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
msg = (
f"Invalid value for 'max_iter': {max_iter_bad} "
"Estimation requires at least one iteration"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
msg = (
f"Invalid value for 'n_init': {n_init_bad} "
"Estimation requires at least one run"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
msg = (
f"Unimplemented initialization method '{init_params_bad}'"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert gmm.n_components == n_components
assert gmm.covariance_type == covariance_type
assert gmm.tol == tol
assert gmm.reg_covar == reg_covar
assert gmm.max_iter == max_iter
assert gmm.n_init == n_init
assert gmm.init_params == init_params
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
msg = re.escape(
"The parameter 'weights' should have the shape of "
f"({n_components},), but got {str(weights_bad_shape.shape)}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
msg = re.escape(
"The parameter 'weights' should be in the range [0, 1], but got"
f" max value {np.min(weights_bad_range):.5f}, "
f"min value {np.max(weights_bad_range):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
msg = re.escape(
"The parameter 'weights' should be normalized, "
f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
msg = "The parameter 'means' should have the shape of "
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': np.full((n_components, n_features), -1.),
'spherical': np.full(n_components, -1.)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
msg = (
f"The parameter '{covar_type} precision' should have "
"the shape of"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
msg = (
f"'{covar_type} precision' should be "
f"{not_positive_errors[covar_type]}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,
nk, xk, 0)
covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,
'spherical')
assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)
def test_compute_log_det_cholesky():
n_features = 2
rand_data = RandomData(np.random.RandomState(0))
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == 'full':
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == 'tied':
predected_det = linalg.det(covariance)
elif covar_type == 'diag':
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == 'spherical':
predected_det = covariance ** n_features
# We compute the cholesky decomposition of the covariance matrix
expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(
covariance, covar_type), covar_type, n_features=n_features)
assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob
# test against with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob(X, means,
precs_spherical, 'spherical')
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
g.predict(X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert adjusted_rand_score(Y, Y_pred) > .95
@pytest.mark.filterwarnings("ignore:.*did not converge.*")
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_gaussian_mixture_fit_predict(seed, max_iter, tol):
rng = np.random.RandomState(seed)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type,
max_iter=max_iter, tol=tol)
# check if fit_predict(X) is equivalent to fit(X).predict(X)
f = copy.deepcopy(g)
Y_pred1 = f.fit(X).predict(X)
Y_pred2 = g.fit_predict(X)
assert_array_equal(Y_pred1, Y_pred2)
assert adjusted_rand_score(Y, Y_pred2) > .95
def test_gaussian_mixture_fit_predict_n_init():
# Check that fit_predict is equivalent to fit.predict, when n_init > 1
X = np.random.RandomState(0).randn(1000, 5)
gm = GaussianMixture(n_components=5, n_init=5, random_state=0)
y_pred1 = gm.fit_predict(X)
y_pred2 = gm.predict(X)
assert_array_equal(y_pred1, y_pred2)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
msg = (
f"Initialization {max_iter} did not converge. Try different init "
"parameters, or increase max_iter, tol or check for degenerate"
" data."
)
with pytest.warns(ConvergenceWarning, match=msg):
g.fit(X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0, n_init=5).fit(X).score(X)
assert train2 >= train1
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert (g.aic(X) - aic) / n_samples < bound
assert (g.bic(X) - bic) / n_samples < bound
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.filterwarnings('ignore:.*did not converge.*')
@pytest.mark.parametrize("seed", (0, 1, 2))
def test_warm_start(seed):
random_state = seed
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,
reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,
reg_covar=0, random_state=random_state,
warm_start=True)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert score2 > score1
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
g.fit(X)
assert not g.converged_
h.fit(X)
# depending on the data there is large variability in the number of
# refit necessary to converge due to the complete randomness of the
# data
for _ in range(1000):
h.fit(X)
if h.converged_:
break
assert h.converged_
@ignore_warnings(category=ConvergenceWarning)
def test_convergence_detected_with_warm_start():
# We check that convergence is detected when warm_start=True
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
for max_iter in (1, 2, 50):
gmm = GaussianMixture(n_components=n_components, warm_start=True,
max_iter=max_iter, random_state=rng)
for _ in range(100):
gmm.fit(X)
if gmm.converged_:
break
assert gmm.converged_
assert max_iter >= gmm.n_iter_
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
gmm1.score(X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert gmm2.score(X) > gmm1.score(X)
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
gmm.score_samples(X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert gmm_score_samples.shape[0] == rand_data.n_samples
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
current_log_likelihood = gmm.fit(X).score(X)
assert current_log_likelihood >= prev_log_likelihood
if gmm.converged_:
break
assert gmm.converged_
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
msg = re.escape(
"Fitting the mixture model failed because some components have"
" ill-defined empirical covariance (for instance caused by "
"singleton or collapsed samples). Try to decrease the number "
"of components, or increase reg_covar."
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng,
n_init=5)
gmm.fit(X)
if covar_type == 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, n_components=3)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
# To sample we need that GaussianMixture is fitted
msg = "This GaussianMixture instance is not fitted"
with pytest.raises(NotFittedError, match=msg):
gmm.sample(0)
gmm.fit(X)
msg = "Invalid value for 'n_samples'"
with pytest.raises(ValueError, match=msg):
gmm.sample(0)
# Just to make sure the class samples correctly
n_samples = 20000
X_s, y_s = gmm.sample(n_samples)
for k in range(n_components):
if covar_type == 'full':
assert_array_almost_equal(gmm.covariances_[k],
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'tied':
assert_array_almost_equal(gmm.covariances_,
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'diag':
assert_array_almost_equal(gmm.covariances_[k],
np.diag(np.cov(X_s[y_s == k].T)),
decimal=1)
else:
assert_array_almost_equal(
gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1)
means_s = np.array([np.mean(X_s[y_s == k], 0)
for k in range(n_components)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
# Check shapes of sampled data, see
# https://github.com/scikit-learn/scikit-learn/issues/7701
assert X_s.shape == (n_samples, n_features)
for sample_size in range(1, 100):
X_s, _ = gmm.sample(sample_size)
assert X_s.shape == (sample_size, n_features)
@ignore_warnings(category=ConvergenceWarning)
def test_init():
# We check that by increasing the n_init number we have a better solution
for random_state in range(15):
rand_data = RandomData(np.random.RandomState(random_state),
n_samples=50, scale=1)
n_components = rand_data.n_components
X = rand_data.X['full']
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, random_state=random_state).fit(X)
gmm2 = GaussianMixture(n_components=n_components, n_init=10,
max_iter=1, random_state=random_state).fit(X)
assert gmm2.lower_bound_ >= gmm1.lower_bound_
| glemaitre/scikit-learn | sklearn/mixture/tests/test_gaussian_mixture.py | Python | bsd-3-clause | 40,311 | 0 |
#
# A sample service to be 'compiled' into an exe-file with py2exe.
#
# See also
# setup.py - the distutils' setup script
# setup.cfg - the distutils' config file for this
# README.txt - detailed usage notes
#
# A minimal service, doing nothing else than
# - write 'start' and 'stop' entries into the NT event log
# - when started, waits to be stopped again.
#
import win32serviceutil
import win32service
import win32event
import win32evtlogutil
class MyService(win32serviceutil.ServiceFramework):
_svc_name_ = "MyService"
_svc_display_name_ = "My Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
# Write a 'started' event to the event log...
win32evtlogutil.ReportEvent(self._svc_name_,
servicemanager.PYS_SERVICE_STARTED,
0, # category
servicemanager.EVENTLOG_INFORMATION_TYPE,
(self._svc_name_, ''))
# wait for beeing stopped...
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# and write a 'stopped' event to the event log.
win32evtlogutil.ReportEvent(self._svc_name_,
servicemanager.PYS_SERVICE_STOPPED,
0, # category
servicemanager.EVENTLOG_INFORMATION_TYPE,
(self._svc_name_, ''))
if __name__ == '__main__':
# Note that this code will not be run in the 'frozen' exe-file!!!
win32serviceutil.HandleCommandLine(MyService)
| pupboss/xndian | deploy/site-packages/py2exe/samples/advanced/MyService.py | Python | mit | 2,011 | 0.001989 |
#!/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
import pycondor
import comptools as comp
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],
help='Detector configuration')
p.add_argument('--low_energy', dest='low_energy',
default=False, action='store_true',
help='Only use events with energy < 10**6.75 GeV')
p.add_argument('--n_side', dest='n_side', type=int,
default=64,
help='Number of times to split the DataFrame')
p.add_argument('--chunksize', dest='chunksize', type=int,
default=1000,
help='Number of lines used when reading in DataFrame')
p.add_argument('--n_batches', dest='n_batches', type=int,
default=50,
help='Number batches running in parallel for each ks-test trial')
p.add_argument('--ks_trials', dest='ks_trials', type=int,
default=100,
help='Number of random maps to generate')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
p.add_argument('--test', dest='test',
default=False, action='store_true',
help='Option to run small test version')
args = p.parse_args()
if args.test:
args.ks_trials = 20
args.n_batches = 10000
args.chunksize = 100
# Define output directories
error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config)
output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config)
log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config)
submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config)
# Define path to executables
make_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'make_maps.py')
merge_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'merge_maps.py')
save_pvals_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'save_pvals.py')
# Create Dagman instance
dag_name = 'anisotropy_kstest_{}'.format(args.config)
if args.test:
dag_name += '_test'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
# Create Job for saving ks-test p-values for each trial
save_pvals_name = 'save_pvals_{}'.format(args.config)
if args.low_energy:
save_pvals_name += '_lowenergy'
save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
save_pvals_infiles_0 = []
save_pvals_infiles_1 = []
dagman.add_job(save_pvals_job)
outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',
'anisotropy', 'random_splits')
if args.test:
outdir = os.path.join(outdir, 'test')
for trial_num in range(args.ks_trials):
# Create map_maps jobs for this ks_trial
make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)
if args.low_energy:
make_maps_name += '_lowenergy'
make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
dagman.add_job(make_maps_job)
merge_maps_infiles_0 = []
merge_maps_infiles_1 = []
for batch_idx in range(args.n_batches):
if args.test and batch_idx > 2:
break
outfile_sample_1 = os.path.join(outdir,
'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
outfile_sample_0 = os.path.join(outdir,
'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
make_maps_arg_list = []
make_maps_arg_list.append('--config {}'.format(args.config))
make_maps_arg_list.append('--n_side {}'.format(args.n_side))
make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))
make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))
make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))
make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))
make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))
make_maps_arg = ' '.join(make_maps_arg_list)
if args.low_energy:
make_maps_arg += ' --low_energy'
make_maps_job.add_arg(make_maps_arg)
# Add this outfile to the list of infiles for merge_maps_job
merge_maps_infiles_0.append(outfile_sample_0)
merge_maps_infiles_1.append(outfile_sample_1)
for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,
merge_maps_infiles_1]):
merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)
if args.low_energy:
merge_maps_name += '_lowenergy'
merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
# Ensure that make_maps_job completes before merge_maps_job begins
make_maps_job.add_child(merge_maps_job)
merge_maps_job.add_child(save_pvals_job)
dagman.add_job(merge_maps_job)
merge_infiles_str = ' '.join(input_file_list)
# Assemble merged output file path
merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))
merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)
merge_maps_job.add_arg(merge_maps_arg)
if sample_idx == 0:
save_pvals_infiles_0.append(merge_outfile)
else:
save_pvals_infiles_1.append(merge_outfile)
save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)
save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)
if args.low_energy:
outfile_basename = 'ks_test_dataframe_lowenergy.hdf'
else:
outfile_basename = 'ks_test_dataframe.hdf'
outfile = os.path.join(outdir, outfile_basename)
save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \
'--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)
save_pvals_job.add_arg(save_pvals_arg)
dagman.build_submit(fancyname=True)
| jrbourbeau/cr-composition | processing/legacy/anisotropy/random_trials/process_kstest.py | Python | mit | 7,627 | 0.002098 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
from numpy import array, shape
from pyspark import SparkContext
from pyspark.mllib._common import \
_dot, _get_unmangled_rdd, _get_unmangled_double_vector_rdd, \
_serialize_double_matrix, _deserialize_double_matrix, \
_serialize_double_vector, _deserialize_double_vector, \
_get_initial_weights, _serialize_rating, _regression_train_wrapper, \
_linear_predictor_typecheck, _get_unmangled_labeled_point_rdd
from pyspark.mllib.linalg import SparseVector
from pyspark.mllib.regression import LabeledPoint, LinearModel
from math import exp, log
class LogisticRegressionModel(LinearModel):
"""A linear binary classification model derived from logistic regression.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data))
>>> lrm.predict(array([1.0])) > 0
True
>>> lrm.predict(array([0.0])) <= 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data))
>>> lrm.predict(array([0.0, 1.0])) > 0
True
>>> lrm.predict(array([0.0, 0.0])) <= 0
True
>>> lrm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0
True
"""
def predict(self, x):
_linear_predictor_typecheck(x, self._coeff)
margin = _dot(x, self._coeff) + self._intercept
prob = 1/(1 + exp(-margin))
return 1 if prob > 0.5 else 0
class LogisticRegressionWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, initialWeights=None):
"""Train a logistic regression model on the given data."""
sc = data.context
train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainLogisticRegressionModelWithSGD(
d._jrdd, iterations, step, miniBatchFraction, i)
return _regression_train_wrapper(sc, train_func, LogisticRegressionModel, data,
initialWeights)
class SVMModel(LinearModel):
"""A support vector machine.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data))
>>> svm.predict(array([1.0])) > 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data))
>>> svm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> svm.predict(SparseVector(2, {0: -1.0})) <= 0
True
"""
def predict(self, x):
_linear_predictor_typecheck(x, self._coeff)
margin = _dot(x, self._coeff) + self._intercept
return 1 if margin >= 0 else 0
class SVMWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, regParam=1.0,
miniBatchFraction=1.0, initialWeights=None):
"""Train a support vector machine on the given data."""
sc = data.context
train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainSVMModelWithSGD(
d._jrdd, iterations, step, regParam, miniBatchFraction, i)
return _regression_train_wrapper(sc, train_func, SVMModel, data, initialWeights)
class NaiveBayesModel(object):
"""
Model for Naive Bayes classifiers.
Contains two parameters:
- pi: vector of logs of class priors (dimension C)
- theta: matrix of logs of class conditional probabilities (CxD)
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
def predict(self, x):
"""Return the most likely class for a data vector x"""
return self.labels[numpy.argmax(self.pi + _dot(x, self.theta.transpose()))]
class NaiveBayes(object):
@classmethod
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features) vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can
handle all kinds of discrete data. For example, by converting
documents into TF-IDF vectors, it can be used for document
classification. By making every vector a 0-1 vector, it can also be
used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
@param data: RDD of NumPy vectors, one per element, where the first
coordinate is the label and the rest is the feature vector
(e.g. a count vector).
@param lambda_: The smoothing parameter
"""
sc = data.context
dataBytes = _get_unmangled_labeled_point_rdd(data)
ans = sc._jvm.PythonMLLibAPI().trainNaiveBayes(dataBytes._jrdd, lambda_)
return NaiveBayesModel(
_deserialize_double_vector(ans[0]),
_deserialize_double_vector(ans[1]),
_deserialize_double_matrix(ans[2]))
def _test():
import doctest
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| adobe-research/spark-cluster-deployment | initial-deployment-puppet/modules/spark/files/spark/python/pyspark/mllib/classification.py | Python | apache-2.0 | 7,307 | 0.001505 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python_2d_ns
----------------------------------
Tests for `python_2d_ns` module.
"""
import sys
import unittest
from python_2d_ns.python_2d_ns import *
class TestPython_2d_ns(unittest.TestCase):
#test x, y coordinates generated by function IC_coor
#assume use 2 threads and rank==1
#y coordinate should be the same as serial code
def test_IC_coor_y_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
self.assertTrue(y[3,0]==-32)
self.assertTrue(y[3,5]==-27)
#x coordinate for rank 2 should start from 0
def test_IC_coor_x_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
#this coordinate should be 0
self.assertTrue(x[0,2]==0)
#test initial condition, Taylor green forcing, test whether the value is given on specific wavenumber
def test_IC_con(self):
#generate kx, ky, assume 2 threads, rank==0
x, y, kx, ky, k2, k2_exp=IC_coor(32, 32, 16, 1, 1, 0, 2)
Vxhat, Vyhat=IC_condition(1, 2, kx, ky, 32, 16)
#this wavenumber should be zero
self.assertTrue(Vyhat[2,5]==0)
#this wavenumber should be non-zero
self.assertTrue(Vxhat[14,14]==0.5j)
#test dealiasing function, which will remove values in wavenumber >= Nx/3
def test_delias(self):
#generate kx, ky, assume 2 threads, rank==1
Vxhat=zeros((Nx, Np), dtype=complex);
Vyhat=zeros((Nx, Np), dtype=complex);
Vxhat[:]=1
Vxhat, Vyhat=delias(Vxhat, Vyhat, Nx, Np, k2)
#this should be zero
self.assertTrue(Vxhat[Nx-1,Np-1]==0)
self.assertTrue(Vyhat[Nx-1,Np-1]==0)
#test FFT and IFFT. Take FFT and IFFT on array, it will transform back (with some numerical errors)
def test_FFT(self):
testa=zeros((Np, Ny), dtype=float);
testahat=empty(( N, Np) , dtype = complex )
if rank==0:
testa[2,0]=1
testa=ifftn_mpi(fftn_mpi(testa, testahat), testa)
#after FFT and IFFT, this value should be the same
if rank==0:
self.assertTrue(testa[2,0]-1<0.0001)
if __name__ == '__main__':
sys.exit(unittest.main())
| xinbian/2dns | tests/test_python_2d_ns.py | Python | mit | 2,191 | 0.043359 |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import ssl
import sys
import re
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import bytes_type
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take callbacks (since writing and reading are
non-blocking and asynchronous).
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=4096):
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
self.read_chunk_size = read_chunk_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_bytes = None
self._read_until_close = False
self._read_callback = None
self._streaming_callback = None
self._write_callback = None
self._close_callback = None
self._connect_callback = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback):
"""Run ``callback`` when we read the given regex pattern.
The callback will get the data read (including the data that
matched the regex and anything that came before it) as an argument.
"""
self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._try_inline_read()
def read_until(self, delimiter, callback):
"""Run ``callback`` when we read the given delimiter.
The callback will get the data read (including the delimiter)
as an argument.
"""
self._set_read_callback(callback)
self._read_delimiter = delimiter
self._try_inline_read()
def read_bytes(self, num_bytes, callback, streaming_callback=None):
"""Run callback when we read the given number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the argument to the final
``callback`` will be empty. Otherwise, the ``callback`` gets
the data as an argument.
"""
self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
def read_until_close(self, callback, streaming_callback=None):
"""Reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the argument to the final
``callback`` will be empty. Otherwise, the ``callback`` gets the
data as an argument.
Subject to ``max_buffer_size`` limit from `IOStream` constructor if
a ``streaming_callback`` is not used.
"""
self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_callback(self._streaming_callback,
self._consume(self._read_buffer_size))
self._run_callback(self._read_callback,
self._consume(self._read_buffer_size))
self._streaming_callback = None
self._read_callback = None
return
self._read_until_close = True
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
def write(self, data, callback=None):
"""Write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
"""
assert isinstance(data, bytes_type)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
if len(data) > WRITE_BUFFER_CHUNK_SIZE:
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
else:
self._write_buffer.append(data)
self._write_callback = stack_context.wrap(callback)
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
self._close_callback = stack_context.wrap(callback)
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_callback(self._streaming_callback,
self._consume(self._read_buffer_size))
callback = self._read_callback
self._read_callback = None
self._read_until_close = False
self._run_callback(callback,
self._consume(self._read_buffer_size))
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
if (self.closed() and self._close_callback and
self._pending_callbacks == 0):
# if there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %d", fd)
return
try:
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
if self._connecting:
self._handle_connect()
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR:
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _handle_read(self):
try:
try:
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# estabilsh a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
finally:
self._pending_callbacks -= 1
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if self._read_from_buffer():
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert not self._read_callback, "Already reading"
self._read_callback = stack_context.wrap(callback)
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
if self._read_from_buffer():
return
self._check_closed()
try:
try:
# See comments in _handle_read about incrementing _pending_callbacks
self._pending_callbacks += 1
while not self.closed():
if self._read_to_buffer() == 0:
break
finally:
self._pending_callbacks -= 1
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if self._read_from_buffer():
return
self._maybe_add_error_listener()
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] == errno.ECONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size >= self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise IOError("Reached maximum read buffer size")
return len(chunk)
def _read_from_buffer(self):
"""Attempts to complete the currently-pending read from the buffer.
Returns True if the read was completed.
"""
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_callback(self._streaming_callback,
self._consume(bytes_to_consume))
if self._read_bytes is not None and self._read_buffer_size >= self._read_bytes:
num_bytes = self._read_bytes
callback = self._read_callback
self._read_callback = None
self._streaming_callback = None
self._read_bytes = None
self._run_callback(callback, self._consume(num_bytes))
return True
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
callback = self._read_callback
delimiter_len = len(self._read_delimiter)
self._read_callback = None
self._streaming_callback = None
self._read_delimiter = None
self._run_callback(callback,
self._consume(loc + delimiter_len))
return True
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
callback = self._read_callback
self._read_callback = None
self._streaming_callback = None
self._read_regex = None
self._run_callback(callback, self._consume(m.end()))
return True
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
return False
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
self._write_buffer_frozen = True
break
else:
if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %d: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer and self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
if self._state is None and self._pending_callbacks == 0:
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket.fileno()
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>`,
i.e. a ``(host, port)`` tuple. If ``callback`` is specified,
it will be called when the connection is completed.
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
"""
self._connecting = True
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK):
gen_log.warning("Connect error on fd %d: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return
self._connect_callback = stack_context.wrap(callback)
self._add_io_state(self.io_loop.WRITE)
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
gen_log.warning("Connect error on fd %d: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno != errno.EINVAL:
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %d %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
if err.args[0] in (errno.ECONNABORTED, errno.ECONNRESET):
return self.close(exc_info=True)
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
super(SSLIOStream, self)._handle_connect()
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return None
elif e.args[0] == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| Drvanon/Game | venv/lib/python3.3/site-packages/tornado/iostream.py | Python | apache-2.0 | 41,823 | 0.000287 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyYtopt(PythonPackage):
"""Ytopt package implements search using Random Forest (SuRF), an autotuning
search method developed within Y-Tune ECP project."""
maintainers = ['Kerilk']
homepage = "https://github.com/ytopt-team/ytopt"
url = "https://github.com/ytopt-team/ytopt/archive/refs/tags/v0.0.1.tar.gz"
version('0.0.2', sha256='5a624aa678b976ff6ef867610bafcb0dfd5c8af0d880138ca5d56d3f776e6d71')
version('0.0.1', sha256='3ca616922c8e76e73f695a5ddea5dd91b0103eada726185f008343cc5cbd7744')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-scikit-learn@0.23.1', type=('build', 'run'))
depends_on('py-dh-scikit-optimize', type=('build', 'run'))
depends_on('py-configspace', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-ytopt-autotune@1.1:', type=('build', 'run'))
depends_on('py-joblib', type=('build', 'run'))
depends_on('py-deap', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-ray', type=('build', 'run'))
depends_on('py-mpi4py@3.0.0:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-ytopt/package.py | Python | lgpl-2.1 | 1,360 | 0.003676 |
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="tickvalssrc",
parent_name="scatter3d.marker.colorbar",
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_tickvalssrc.py | Python | mit | 461 | 0 |
import sys
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class Visualizer():
def __init__(self, *args):
pass
def show_performance(self, list_of_tuples, fig_size=(9,9), font_scale=1.1, file=''):
"""
Parameters: list_of_tuples:
- list containing (clf_name, clf_performance) tuples for each
classifier we wish to visualize
fig_size:
- set figure size (default: (9,9))
font_scale:
- text scale in seaborn plots (default: 1.1)
file:
- string containing a valid filename (default: '')
Output: f: (matplotlib.pyplot.figure object)
"""
if not (isinstance(list_of_tuples, list) and isinstance(list_of_tuples[0], tuple)):
raise ValueError("Expecting a list of tuples")
sns.set(font_scale=font_scale)
sns.set_style("whitegrid")
data = list()
for name, value in list_of_tuples: data.append([name, value])
data = pd.DataFrame(data, columns=['classifier', 'performance'])
data.sort_values('performance', inplace=True, ascending=False)
"""
Close all figures (can close individual figure using plt.close(f)
where f is a matplotlib.pyplot.figure object)
"""
plt.close('all')
f = plt.figure(figsize=fig_size)
sns.barplot(x='performance', y='classifier', data=data)
plt.xlabel('performance')
if len(file)>1:
try:
plt.savefig(file)
except:
pass
return f
if __name__ == '__main__':
sys.exit(-1) | magnusax/ml-meta-wrapper | gazer/visualize.py | Python | mit | 1,824 | 0.01261 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from .views import (AvailableMapListview, AvailableMapsDetailview,
index_view, MyArmiesListView, ArmyCreateView,
ArmyDetailView, RobotCreateView)
urlpatterns = patterns('',
url(r'maingame/maps$', AvailableMapListview.as_view(), name='list_available_maps'),
url(r'maingame/map/(?P<pk>\d+)$', AvailableMapsDetailview.as_view(), name="available_map_detail" ),
url(r'maingame/my_armies$', MyArmiesListView.as_view(), name='my_armies'),
url(r'maingame/army/(?P<pk>\d+)$', ArmyDetailView.as_view(), name="army_detail" ),
url(r'maingame/create_armies$', ArmyCreateView.as_view(), name='add_army'),
url(r'maingame/create_robot$', RobotCreateView.as_view(), name='add_robot_to_army'),
url(r'^$', index_view, name="index"),
)
| mrjmad/robotswars | maingame/urls.py | Python | mit | 868 | 0.009217 |
#!/usr/bin/env python
########################################################################
# File : dirac-version
# Author : Ricardo Graciani
########################################################################
"""
Print version of current DIRAC installation
Usage:
dirac-version [option]
Example:
$ dirac-version
"""
import argparse
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.parse_known_args()
print(DIRAC.version)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/Core/scripts/dirac_version.py | Python | gpl-3.0 | 662 | 0 |
def is_palindrome(obj):
obj = str(obj)
obj_list = list(obj)
obj_list_reversed = obj_list[::-1]
return obj_list == obj_list_reversed
def generate_rotations(word):
letters = list(word)
string_rotations = []
counter = len(letters)
temp = letters
while counter != 0:
current_letter = temp.pop(0)
temp.append(current_letter)
word = "".join(temp)
string_rotations.append(word)
counter -= 1
return string_rotations
def get_rotated_palindromes(string_rotations):
is_empty = True
for word in string_rotations:
if is_palindrome(word) is True:
print(word)
is_empty = False
if is_empty is True:
print("NONE")
def main():
user_input = input("Enter a string: ")
string_rotations = generate_rotations(user_input)
get_rotated_palindromes(string_rotations)
if __name__ == '__main__':
main()
| pepincho/Python101-and-Algo1-Courses | Algo-1/Application/1-Palindromes.py | Python | mit | 937 | 0 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlite3
from flask import Flask, render_template, g, current_app, request
from flask.ext.paginate import Pagination
app = Flask(__name__)
app.config.from_pyfile('app.cfg')
@app.before_request
def before_request():
g.conn = sqlite3.connect('test.db')
g.conn.row_factory = sqlite3.Row
g.cur = g.conn.cursor()
@app.teardown_request
def teardown(error):
if hasattr(g, 'conn'):
g.conn.close()
@app.route('/')
def index():
g.cur.execute('select count(*) from users')
total = g.cur.fetchone()[0]
page, per_page, offset = get_page_items()
sql = 'select name from users order by name limit {}, {}'\
.format(offset, per_page)
g.cur.execute(sql)
users = g.cur.fetchall()
pagination = get_pagination(page=page,
per_page=per_page,
total=total,
record_name='users',
)
return render_template('index.html', users=users,
page=page,
per_page=per_page,
pagination=pagination,
)
def get_css_framework():
return current_app.config.get('CSS_FRAMEWORK', 'bootstrap3')
def get_link_size():
return current_app.config.get('LINK_SIZE', 'sm')
def show_single_page_or_not():
return current_app.config.get('SHOW_SINGLE_PAGE', False)
def get_page_items():
page = int(request.args.get('page', 1))
per_page = request.args.get('per_page')
if not per_page:
per_page = current_app.config.get('PER_PAGE', 10)
else:
per_page = int(per_page)
offset = (page - 1) * per_page
return page, per_page, offset
def get_pagination(**kwargs):
kwargs.setdefault('record_name', 'records')
return Pagination(css_framework=get_css_framework(),
link_size=get_link_size(),
show_single_page=show_single_page_or_not(),
**kwargs
)
if __name__ == '__main__':
app.run(debug=True)
| wangjun/flask-paginate | example/app.py | Python | bsd-3-clause | 2,185 | 0.001373 |
from django.contrib.contenttypes.models import ContentType
import json
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from guardian.shortcuts import get_objects_for_user
from account.models import DepartmentGroup
from backend.tasks import TestConnectionTask
from event.models import NotificationPreferences
from .models import Application, Department, Environment, Server, ServerRole
from task.models import Execution
@login_required
def index(request):
data = {}
executions = Execution.objects.filter(task__application__department_id=request.current_department_id)
if not executions.count():
return redirect(reverse('first_steps_page'))
return render(request, 'page/index.html', data)
@permission_required('core.view_application', (Application, 'id', 'application_id'))
def application_page(request, application_id):
data = {}
data['application'] = get_object_or_404(Application, pk=application_id)
return render(request, 'page/application.html', data)
@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))
def environment_page(request, environment_id):
data = {}
data['environment'] = get_object_or_404(Environment, pk=environment_id)
data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))
return render(request, 'page/environment.html', data)
@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))
def server_test(request, server_id):
data = {}
data['server'] = get_object_or_404(Server, pk=server_id)
data['task_id'] = TestConnectionTask().delay(server_id).id
return render(request, 'partial/server_test.html', data)
@login_required
def server_test_ajax(request, task_id):
data = {}
task = TestConnectionTask().AsyncResult(task_id)
if task.status == 'SUCCESS':
status, output = task.get()
data['status'] = status
data['output'] = output
elif task.status == 'FAILED':
data['status'] = False
else:
data['status'] = None
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def first_steps_page(request):
data = {}
return render(request, 'page/first_steps.html', data)
@login_required
def settings_page(request, section='user', subsection='profile'):
data = {}
data['section'] = section
data['subsection'] = subsection
data['department'] = Department(pk=request.current_department_id)
data['on_settings'] = True
handler = '_settings_%s_%s' % (section, subsection)
if section == 'system' and request.user.is_superuser is not True:
return redirect('index')
if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):
return redirect('index')
if handler in globals():
data = globals()[handler](request, data)
else:
raise Http404
return render(request, 'page/settings.html', data)
def _settings_account_profile(request, data):
data['subsection_template'] = 'partial/account_profile.html'
from account.forms import account_create_form
form = account_create_form('user_profile', request, request.user.id)
form.fields['email'].widget.attrs['readonly'] = True
data['form'] = form
if request.method == 'POST':
if form.is_valid():
form.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_password(request, data):
data['subsection_template'] = 'partial/account_password.html'
from account.forms import account_create_form
form = account_create_form('user_password', request, request.user.id)
data['form'] = form
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=False)
user.set_password(user.password)
user.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_notifications(request, data):
data['subsection_template'] = 'partial/account_notifications.html'
data['applications'] = get_objects_for_user(request.user, 'core.view_application')
content_type = ContentType.objects.get_for_model(Application)
if request.method == 'POST':
for application in data['applications']:
key = 'notification[%s]' % application.id
notification, created = NotificationPreferences.objects.get_or_create(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type,
object_id=application.id)
if notification.is_active != (key in request.POST):
notification.is_active = key in request.POST
notification.save()
messages.success(request, 'Saved')
data['notifications'] = NotificationPreferences.objects.filter(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type.id).values_list('object_id', 'is_active')
data['notifications'] = dict(data['notifications'])
return data
def _settings_department_applications(request, data):
data['subsection_template'] = 'partial/application_list.html'
data['applications'] = Application.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['applications'].count())
return data
def _settings_department_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
from guardian.shortcuts import get_users_with_perms
department = Department.objects.get(pk=request.current_department_id)
data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')
data['department_user_list'] = True
data['form_name'] = 'user'
return data
def _settings_department_groups(request, data):
data['subsection_template'] = 'partial/group_list.html'
data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)
return data
def _settings_department_serverroles(request, data):
data['subsection_template'] = 'partial/serverrole_list.html'
data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['serverroles'].count())
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_departments(request, data):
data['subsection_template'] = 'partial/department_list.html'
data['departments'] = Department.objects.all()
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')
data['form_name'] = 'usersystem'
return data
def department_switch(request, id):
department = get_object_or_404(Department, pk=id)
if request.user.has_perm('core.view_department', department):
request.session['current_department_id'] = int(id)
else:
messages.error(request, 'Access forbidden')
return redirect('index')
def handle_403(request):
print 'aaaaaaaa'
messages.error(request, 'Access forbidden')
return redirect('index') | gunnery/gunnery | gunnery/core/views.py | Python | apache-2.0 | 7,964 | 0.001758 |
"""Support for Acmeda Roller Blind Batteries."""
from __future__ import annotations
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .base import AcmedaBase
from .const import ACMEDA_HUB_UPDATE, DOMAIN
from .helpers import async_add_acmeda_entities
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Acmeda Rollers from a config entry."""
hub = hass.data[DOMAIN][config_entry.entry_id]
current: set[int] = set()
@callback
def async_add_acmeda_sensors():
async_add_acmeda_entities(
hass, AcmedaBattery, config_entry, current, async_add_entities
)
hub.cleanup_callbacks.append(
async_dispatcher_connect(
hass,
ACMEDA_HUB_UPDATE.format(config_entry.entry_id),
async_add_acmeda_sensors,
)
)
class AcmedaBattery(AcmedaBase, SensorEntity):
"""Representation of a Acmeda cover device."""
device_class = SensorDeviceClass.BATTERY
_attr_native_unit_of_measurement = PERCENTAGE
@property
def name(self):
"""Return the name of roller."""
return f"{super().name} Battery"
@property
def native_value(self):
"""Return the state of the device."""
return self.roller.battery
| mezz64/home-assistant | homeassistant/components/acmeda/sensor.py | Python | apache-2.0 | 1,666 | 0 |
#!/usr/bin/env python
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python setup script.
"""
from setuptools import setup, find_packages
def extract_requirements(filename):
with open(filename, 'r') as requirements_file:
return [x[:-1] for x in requirements_file.readlines()]
install_requires = extract_requirements('requirements.txt')
test_require = extract_requirements('test-requirements.txt')
setup(
name='etcdobj',
version='0.0.0',
description='Basic ORM for etcd',
author='Steve Milner',
url='https://github.com/ashcrow/etcdobj',
license="MBSD",
install_requires=install_requires,
tests_require=test_require,
package_dir={'': 'src'},
packages=find_packages('src'),
)
| ashcrow/etcdobj | setup.py | Python | bsd-3-clause | 2,107 | 0.000475 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
"""
Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI)
This plugin will forward authenticated REST API calls
to the PLUMgrid Network Management System called Director
"""
import netaddr
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.plumgrid.common import exceptions as plum_excep
from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver
LOG = logging.getLogger(__name__)
director_server_opts = [
cfg.StrOpt('director_server', default='localhost',
help=_("PLUMgrid Director server to connect to")),
cfg.StrOpt('director_server_port', default='8080',
help=_("PLUMgrid Director server port to connect to")),
cfg.StrOpt('username', default='username',
help=_("PLUMgrid Director admin username")),
cfg.StrOpt('password', default='password', secret=True,
help=_("PLUMgrid Director admin password")),
cfg.IntOpt('servertimeout', default=5,
help=_("PLUMgrid Director server timeout")),
cfg.StrOpt('driver',
default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib",
help=_("PLUMgrid Driver")), ]
cfg.CONF.register_opts(director_server_opts, "plumgriddirector")
class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "binding",
"quotas", "provider"]
binding_view = "extension:port_binding:view"
binding_set = "extension:port_binding:set"
def __init__(self):
LOG.info(_('Neutron PLUMgrid Director: Starting Plugin'))
super(NeutronPluginPLUMgridV2, self).__init__()
self.plumgrid_init()
LOG.debug(_('Neutron PLUMgrid Director: Neutron server with '
'PLUMgrid Plugin has started'))
def plumgrid_init(self):
"""PLUMgrid initialization."""
director_plumgrid = cfg.CONF.plumgriddirector.director_server
director_port = cfg.CONF.plumgriddirector.director_server_port
director_admin = cfg.CONF.plumgriddirector.username
director_password = cfg.CONF.plumgriddirector.password
timeout = cfg.CONF.plumgriddirector.servertimeout
plum_driver = cfg.CONF.plumgriddirector.driver
# PLUMgrid Director info validation
LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid)
self._plumlib = importutils.import_object(plum_driver)
self._plumlib.director_conn(director_plumgrid, director_port, timeout,
director_admin, director_password)
def create_network(self, context, network):
"""Create Neutron network.
Creates a PLUMgrid-based bridge.
"""
LOG.debug(_('Neutron PLUMgrid Director: create_network() called'))
# Plugin DB - Network Create and validation
tenant_id = self._get_tenant_id_for_create(context,
network["network"])
self._network_admin_state(network)
with context.session.begin(subtransactions=True):
net_db = super(NeutronPluginPLUMgridV2,
self).create_network(context, network)
# Propagate all L3 data into DB
self._process_l3_create(context, net_db, network['network'])
try:
LOG.debug(_('PLUMgrid Library: create_network() called'))
self._plumlib.create_network(tenant_id, net_db, network)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return created network
return net_db
def update_network(self, context, net_id, network):
"""Update Neutron network.
Updates a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: update_network() called"))
self._network_admin_state(network)
tenant_id = self._get_tenant_id_for_create(context, network["network"])
with context.session.begin(subtransactions=True):
# Plugin DB - Network Update
net_db = super(
NeutronPluginPLUMgridV2, self).update_network(context,
net_id, network)
self._process_l3_update(context, net_db, network['network'])
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.update_network(tenant_id, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return updated network
return net_db
def delete_network(self, context, net_id):
"""Delete Neutron network.
Deletes a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: delete_network() called"))
net_db = super(NeutronPluginPLUMgridV2,
self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
# Plugin DB - Network Delete
super(NeutronPluginPLUMgridV2, self).delete_network(context,
net_id)
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.delete_network(net_db, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def create_port(self, context, port):
"""Create Neutron port.
Creates a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: create_port() called"))
# Port operations on PLUMgrid Director is an automatic operation
# from the VIF driver operations in Nova.
# It requires admin_state_up to be True
port["port"]["admin_state_up"] = True
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2, self).create_port(context,
port)
device_id = port_db["device_id"]
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: create_port() called"))
self._plumlib.create_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Plugin DB - Port Create and Return port
return self._port_viftype_binding(context, port_db)
def update_port(self, context, port_id, port):
"""Update Neutron port.
Updates a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: update_port() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2, self).update_port(
context, port_id, port)
device_id = port_db["device_id"]
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: create_port() called"))
self._plumlib.update_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Plugin DB - Port Update
return self._port_viftype_binding(context, port_db)
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete Neutron port.
Deletes a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: delete_port() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2,
self).get_port(context, port_id)
self.disassociate_floatingips(context, port_id)
super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id)
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
device_id = port_db["device_id"]
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: delete_port() called"))
self._plumlib.delete_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port_db = super(NeutronPluginPLUMgridV2,
self).get_port(context, id, fields)
self._port_viftype_binding(context, port_db)
return self._fields(port_db, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports_db = super(NeutronPluginPLUMgridV2,
self).get_ports(context, filters, fields)
for port_db in ports_db:
self._port_viftype_binding(context, port_db)
return [self._fields(port, fields) for port in ports_db]
def create_subnet(self, context, subnet):
"""Create Neutron subnet.
Creates a PLUMgrid-based DHCP and NAT Virtual Network
Functions (VNFs).
"""
LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Create
net_db = super(NeutronPluginPLUMgridV2, self).get_network(
context, subnet['subnet']['network_id'], fields=None)
s = subnet['subnet']
ipnet = netaddr.IPNetwork(s['cidr'])
# PLUMgrid Director reserves the last IP address for GW
# when is not defined
if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED:
gw_ip = str(netaddr.IPAddress(ipnet.last - 1))
subnet['subnet']['gateway_ip'] = gw_ip
# PLUMgrid reserves the first IP
if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED:
allocation_pool = self._allocate_pools_for_subnet(context, s)
subnet['subnet']['allocation_pools'] = allocation_pool
sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet(
context, subnet)
try:
LOG.debug(_("PLUMgrid Library: create_subnet() called"))
self._plumlib.create_subnet(sub_db, net_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return sub_db
def delete_subnet(self, context, subnet_id):
"""Delete subnet core Neutron API."""
LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called"))
# Collecting subnet info
sub_db = self._get_subnet(context, subnet_id)
tenant_id = self._get_tenant_id_for_create(context, subnet_id)
net_id = sub_db["network_id"]
net_db = self.get_network(context, net_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Delete
super(NeutronPluginPLUMgridV2, self).delete_subnet(
context, subnet_id)
try:
LOG.debug(_("PLUMgrid Library: delete_subnet() called"))
self._plumlib.delete_subnet(tenant_id, net_db, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def update_subnet(self, context, subnet_id, subnet):
"""Update subnet core Neutron API."""
LOG.debug(_("update_subnet() called"))
# Collecting subnet info
orig_sub_db = self._get_subnet(context, subnet_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Update
new_sub_db = super(NeutronPluginPLUMgridV2,
self).update_subnet(context, subnet_id, subnet)
ipnet = netaddr.IPNetwork(new_sub_db['cidr'])
try:
# PLUMgrid Server does not support updating resources yet
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return new_sub_db
def create_router(self, context, router):
"""
Create router extension Neutron API
"""
LOG.debug(_("Neutron PLUMgrid Director: create_router() called"))
tenant_id = self._get_tenant_id_for_create(context, router["router"])
with context.session.begin(subtransactions=True):
# Create router in DB
router_db = super(NeutronPluginPLUMgridV2,
self).create_router(context, router)
# Create router on the network controller
try:
# Add Router to VND
LOG.debug(_("PLUMgrid Library: create_router() called"))
self._plumlib.create_router(tenant_id, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return created router
return router_db
def update_router(self, context, router_id, router):
LOG.debug(_("Neutron PLUMgrid Director: update_router() called"))
with context.session.begin(subtransactions=True):
router_db = super(NeutronPluginPLUMgridV2,
self).update_router(context, router_id, router)
try:
LOG.debug(_("PLUMgrid Library: update_router() called"))
self._plumlib.update_router(router_db, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return updated router
return router_db
def delete_router(self, context, router_id):
LOG.debug(_("Neutron PLUMgrid Director: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
super(NeutronPluginPLUMgridV2, self).delete_router(context,
router_id)
try:
LOG.debug(_("PLUMgrid Library: delete_router() called"))
self._plumlib.delete_router(tenant_id, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("Neutron PLUMgrid Director: "
"add_router_interface() called"))
with context.session.begin(subtransactions=True):
# Validate args
router_db = self._get_router(context, router_id)
tenant_id = router_db['tenant_id']
# Create interface in DB
int_router = super(NeutronPluginPLUMgridV2,
self).add_router_interface(context,
router_id,
interface_info)
port_db = self._get_port(context, int_router['port_id'])
subnet_id = port_db["fixed_ips"][0]["subnet_id"]
subnet_db = super(NeutronPluginPLUMgridV2,
self)._get_subnet(context, subnet_id)
ipnet = netaddr.IPNetwork(subnet_db['cidr'])
# Create interface on the network controller
try:
LOG.debug(_("PLUMgrid Library: add_router_interface() called"))
self._plumlib.add_router_interface(tenant_id, router_id,
port_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return int_router
def remove_router_interface(self, context, router_id, int_info):
LOG.debug(_("Neutron PLUMgrid Director: "
"remove_router_interface() called"))
with context.session.begin(subtransactions=True):
# Validate args
router_db = self._get_router(context, router_id)
tenant_id = router_db['tenant_id']
if 'port_id' in int_info:
port = self._get_port(context, int_info['port_id'])
net_id = port['network_id']
elif 'subnet_id' in int_info:
subnet_id = int_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
net_id = subnet['network_id']
# Remove router in DB
del_int_router = super(NeutronPluginPLUMgridV2,
self).remove_router_interface(context,
router_id,
int_info)
try:
LOG.debug(_("PLUMgrid Library: "
"remove_router_interface() called"))
self._plumlib.remove_router_interface(tenant_id,
net_id, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return del_int_router
def create_floatingip(self, context, floatingip):
LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip = super(NeutronPluginPLUMgridV2,
self).create_floatingip(context, floatingip)
try:
LOG.debug(_("PLUMgrid Library: create_floatingip() called"))
self._plumlib.create_floatingip(floating_ip)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return floating_ip
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip_orig = super(NeutronPluginPLUMgridV2,
self).get_floatingip(context, id)
floating_ip = super(NeutronPluginPLUMgridV2,
self).update_floatingip(context, id,
floatingip)
try:
LOG.debug(_("PLUMgrid Library: update_floatingip() called"))
self._plumlib.update_floatingip(floating_ip_orig, floating_ip,
id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return floating_ip
def delete_floatingip(self, context, id):
LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip_orig = super(NeutronPluginPLUMgridV2,
self).get_floatingip(context, id)
super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id)
try:
LOG.debug(_("PLUMgrid Library: delete_floatingip() called"))
self._plumlib.delete_floatingip(floating_ip_orig, id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def disassociate_floatingips(self, context, port_id):
LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() "
"called"))
try:
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one()
LOG.debug(_("PLUMgrid Library: disassociate_floatingips()"
" called"))
self._plumlib.disassociate_floatingips(floating_ip, port_id)
except sa_exc.NoResultFound:
pass
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
super(NeutronPluginPLUMgridV2,
self).disassociate_floatingips(context, port_id)
"""
Internal PLUMgrid Fuctions
"""
def _get_plugin_version(self):
return plugin_ver.VERSION
def _port_viftype_binding(self, context, port):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def _network_admin_state(self, network):
if network["network"].get("admin_state_up") is False:
LOG.warning(_("Networks with admin_state_up=False are not "
"supported by PLUMgrid plugin yet."))
return network
def _allocate_pools_for_subnet(self, context, subnet):
"""Create IP allocation pools for a given subnet
Pools are defined by the 'allocation_pools' attribute,
a list of dict objects with 'start' and 'end' keys for
defining the pool range.
Modified from Neutron DB based class
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(subnet['cidr'])
first_ip = net.first + 2
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append({'start': str(netaddr.IPAddress(first_ip)),
'end': str(netaddr.IPAddress(split_ip - 1))})
if split_ip < last_ip:
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
'end': str(netaddr.IPAddress(last_ip))})
# return auto-generated pools
# no need to check for their validity
return pools
| subramani95/neutron | neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py | Python | apache-2.0 | 24,717 | 0 |
import copy
import logging
import os
import pytest
import salt.ext.tornado
import salt.ext.tornado.gen
import salt.ext.tornado.testing
import salt.minion
import salt.syspaths
import salt.utils.crypt
import salt.utils.event as event
import salt.utils.platform
import salt.utils.process
from salt._compat import ipaddress
from salt.exceptions import SaltClientError, SaltMasterUnresolvableError, SaltSystemExit
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
def test_minion_load_grains_false():
"""
Minion does not generate grains when load_grains is False
"""
opts = {"random_startup_delay": 0, "grains": {"foo": "bar"}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts, load_grains=False)
assert minion.opts["grains"] == opts["grains"]
grainsfunc.assert_not_called()
def test_minion_load_grains_true():
"""
Minion generates grains when load_grains is True
"""
opts = {"random_startup_delay": 0, "grains": {}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts, load_grains=True)
assert minion.opts["grains"] != {}
grainsfunc.assert_called()
def test_minion_load_grains_default():
"""
Minion load_grains defaults to True
"""
opts = {"random_startup_delay": 0, "grains": {}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts)
assert minion.opts["grains"] != {}
grainsfunc.assert_called()
@pytest.mark.parametrize(
"event",
[
(
"fire_event",
lambda data, tag, cb=None, timeout=60: True,
),
(
"fire_event_async",
lambda data, tag, cb=None, timeout=60: salt.ext.tornado.gen.maybe_future(
True
),
),
],
)
def test_send_req_fires_completion_event(event):
event_enter = MagicMock()
event_enter.send.side_effect = event[1]
event = MagicMock()
event.__enter__.return_value = event_enter
with patch("salt.utils.event.get_event", return_value=event):
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["random_startup_delay"] = 0
opts["return_retry_tries"] = 30
opts["grains"] = {}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
load = {"load": "value"}
timeout = 60
if "async" in event[0]:
rtn = minion._send_req_async(load, timeout).result()
else:
rtn = minion._send_req_sync(load, timeout)
# get the
for idx, call in enumerate(event.mock_calls, 1):
if "fire_event" in call[0]:
condition_event_tag = (
len(call.args) > 1
and call.args[1] == "__master_req_channel_payload"
)
condition_event_tag_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.args[1], "__master_req_channel_payload"
)
condition_timeout = (
len(call.kwargs) == 1 and call.kwargs["timeout"] == timeout
)
condition_timeout_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.kwargs["timeout"], timeout
)
fire_event_called = True
assert condition_event_tag, condition_event_tag_error
assert condition_timeout, condition_timeout_error
assert fire_event_called
assert rtn
@patch("salt.channel.client.ReqChannel.factory")
def test_mine_send_tries(req_channel_factory):
channel_enter = MagicMock()
channel_enter.send.side_effect = lambda load, timeout, tries: tries
channel = MagicMock()
channel.__enter__.return_value = channel_enter
req_channel_factory.return_value = channel
opts = {
"random_startup_delay": 0,
"grains": {},
"return_retry_tries": 20,
"minion_sign_messages": False,
}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
minion.tok = "token"
data = {}
tag = "tag"
rtn = minion._mine_send(tag, data)
assert rtn == 20
def test_invalid_master_address():
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": float("127.0"),
"master_port": "4555",
"retry_dns": False,
},
):
pytest.raises(SaltSystemExit, salt.minion.resolve_dns, opts)
def test_source_int_name_local():
"""
test when file_client local and
source_interface_name is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": True,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ip": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
}
@pytest.mark.slow_test
def test_source_int_name_remote():
"""
test when file_client remote and
source_interface_name is set and
interface is down
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "remote",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
}
@pytest.mark.slow_test
def test_source_address():
"""
test when source_address is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "",
"source_address": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"source_publish_port": 49018,
"source_ret_port": 49017,
"master_uri": "tcp://127.0.0.1:4555",
"source_ip": "111.1.0.1",
"master_ip": "127.0.0.1",
}
# Tests for _handle_decoded_payload in the salt.minion.Minion() class: 3
@pytest.mark.slow_test
def test_handle_decoded_payload_jid_match_in_jid_queue():
"""
Tests that the _handle_decoded_payload function returns when a jid is given that is already present
in the jid_queue.
Note: This test doesn't contain all of the patch decorators above the function like the other tests
for _handle_decoded_payload below. This is essential to this test as the call to the function must
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
jobs.
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_data = {"fun": "foo.bar", "jid": 123}
mock_jid_queue = [123]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
ret = minion._handle_decoded_payload(mock_data).result()
assert minion.jid_queue == mock_jid_queue
assert ret is None
finally:
minion.destroy()
@pytest.mark.slow_test
def test_handle_decoded_payload_jid_queue_addition():
"""
Tests that the _handle_decoded_payload function adds a jid to the minion's jid_queue when the new
jid isn't already present in the jid_queue.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_jid = 11111
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_data = {"fun": "foo.bar", "jid": mock_jid}
mock_jid_queue = [123, 456]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
assert minion.jid_queue == mock_jid_queue
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
minion._handle_decoded_payload(mock_data).result()
mock_jid_queue.append(mock_jid)
assert minion.jid_queue == mock_jid_queue
finally:
minion.destroy()
@pytest.mark.slow_test
def test_handle_decoded_payload_jid_queue_reduced_minion_jid_queue_hwm():
"""
Tests that the _handle_decoded_payload function removes a jid from the minion's jid_queue when the
minion's jid_queue high water mark (minion_jid_queue_hwm) is hit.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["minion_jid_queue_hwm"] = 2
mock_data = {"fun": "foo.bar", "jid": 789}
mock_jid_queue = [123, 456]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
assert minion.jid_queue == mock_jid_queue
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
# and contains the new jid
minion._handle_decoded_payload(mock_data).result()
assert len(minion.jid_queue) == 2
assert minion.jid_queue == [456, 789]
finally:
minion.destroy()
@pytest.mark.slow_test
def test_process_count_max():
"""
Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes,
as per process_count_max.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
), patch(
"salt.utils.minion.running", MagicMock(return_value=[])
), patch(
"salt.ext.tornado.gen.sleep",
MagicMock(return_value=salt.ext.tornado.concurrent.Future()),
):
process_count_max = 10
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["__role"] = "minion"
mock_opts["minion_jid_queue_hwm"] = 100
mock_opts["process_count_max"] = process_count_max
io_loop = salt.ext.tornado.ioloop.IOLoop()
minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop)
try:
# mock gen.sleep to throw a special Exception when called, so that we detect it
class SleepCalledException(Exception):
"""Thrown when sleep is called"""
salt.ext.tornado.gen.sleep.return_value.set_exception(
SleepCalledException()
)
# up until process_count_max: gen.sleep does not get called, processes are started normally
for i in range(process_count_max):
mock_data = {"fun": "foo.bar", "jid": i}
io_loop.run_sync(
lambda data=mock_data: minion._handle_decoded_payload(data)
)
assert (
salt.utils.process.SignalHandlingProcess.start.call_count == i + 1
)
assert len(minion.jid_queue) == i + 1
salt.utils.minion.running.return_value += [i]
# above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started
mock_data = {"fun": "foo.bar", "jid": process_count_max + 1}
pytest.raises(
SleepCalledException,
lambda: io_loop.run_sync(
lambda: minion._handle_decoded_payload(mock_data)
),
)
assert (
salt.utils.process.SignalHandlingProcess.start.call_count
== process_count_max
)
assert len(minion.jid_queue) == process_count_max + 1
finally:
minion.destroy()
@pytest.mark.slow_test
def test_beacons_before_connect():
"""
Tests that the 'beacons_before_connect' option causes the beacons to be initialized before connect.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["beacons_before_connect"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure beacons are initialized but the sheduler is not
assert "beacons" in minion.periodic_callbacks
assert "schedule" not in minion.periodic_callbacks
finally:
minion.destroy()
@pytest.mark.slow_test
def test_scheduler_before_connect():
"""
Tests that the 'scheduler_before_connect' option causes the scheduler to be initialized before connect.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["scheduler_before_connect"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
assert "schedule" in minion.periodic_callbacks
assert "beacons" not in minion.periodic_callbacks
finally:
minion.destroy()
def test_minion_module_refresh(tmp_path):
"""
Tests that the 'module_refresh' just return in case there is no 'schedule'
because destroy method was already called.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
try:
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["cachedir"] = str(tmp_path)
minion = salt.minion.Minion(
mock_opts,
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
minion.schedule = salt.utils.schedule.Schedule(mock_opts, {}, returners={})
assert hasattr(minion, "schedule")
minion.destroy()
assert not hasattr(minion, "schedule")
assert not minion.module_refresh()
finally:
minion.destroy()
def test_minion_module_refresh_beacons_refresh(tmp_path):
"""
Tests that 'module_refresh' calls beacons_refresh and that the
minion object has a beacons attribute with beacons.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
try:
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["cachedir"] = str(tmp_path)
minion = salt.minion.Minion(
mock_opts,
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
minion.schedule = salt.utils.schedule.Schedule(mock_opts, {}, returners={})
assert not hasattr(minion, "beacons")
minion.module_refresh()
assert hasattr(minion, "beacons")
assert hasattr(minion.beacons, "beacons")
assert "service.beacon" in minion.beacons.beacons
minion.destroy()
finally:
minion.destroy()
@pytest.mark.slow_test
def test_when_ping_interval_is_set_the_callback_should_be_added_to_periodic_callbacks():
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["ping_interval"] = 10
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.connected = MagicMock(side_effect=(False, True))
minion._fire_master_minion_start = MagicMock()
minion.tune_in(start=False)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
assert "ping" in minion.periodic_callbacks
finally:
minion.destroy()
@pytest.mark.slow_test
def test_when_passed_start_event_grains():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
# provide mock opts an os grain since we'll look for it later.
mock_opts["grains"]["os"] = "linux"
mock_opts["start_event_grains"] = ["os"]
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master(
"Minion has started", "minion_start", include_startup_grains=True
)
load = minion._send_req_sync.call_args[0][0]
assert "grains" in load
assert "os" in load["grains"]
finally:
minion.destroy()
@pytest.mark.slow_test
def test_when_not_passed_start_event_grains():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master("Minion has started", "minion_start")
load = minion._send_req_sync.call_args[0][0]
assert "grains" not in load
finally:
minion.destroy()
@pytest.mark.slow_test
def test_when_other_events_fired_and_start_event_grains_are_set():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["start_event_grains"] = ["os"]
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master("Custm_event_fired", "custom_event")
load = minion._send_req_sync.call_args[0][0]
assert "grains" not in load
finally:
minion.destroy()
@pytest.mark.slow_test
def test_minion_retry_dns_count():
"""
Tests that the resolve_dns will retry dns look ups for a maximum of
3 times before raising a SaltMasterUnresolvableError exception.
"""
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "dummy",
"master_port": "4555",
"retry_dns": 1,
"retry_dns_count": 3,
},
):
pytest.raises(SaltMasterUnresolvableError, salt.minion.resolve_dns, opts)
@pytest.mark.slow_test
def test_gen_modules_executors():
"""
Ensure gen_modules is called with the correct arguments #54429
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
class MockPillarCompiler:
def compile_pillar(self):
return {}
try:
with patch("salt.pillar.get_pillar", return_value=MockPillarCompiler()):
with patch("salt.loader.executors") as execmock:
minion.gen_modules()
assert execmock.called_with(minion.opts, minion.functions)
finally:
minion.destroy()
@patch("salt.utils.process.default_signals")
@pytest.mark.slow_test
def test_reinit_crypto_on_fork(def_mock):
"""
Ensure salt.utils.crypt.reinit_crypto() is executed when forking for new job
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["multiprocessing"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
job_data = {"jid": "test-jid", "fun": "test.ping"}
def mock_start(self):
# pylint: disable=comparison-with-callable
assert (
len(
[
x
for x in self._after_fork_methods
if x[0] == salt.utils.crypt.reinit_crypto
]
)
== 1
)
# pylint: enable=comparison-with-callable
with patch.object(salt.utils.process.SignalHandlingProcess, "start", mock_start):
io_loop.run_sync(lambda: minion._handle_decoded_payload(job_data))
def test_minion_manage_schedule():
"""
Tests that the manage_schedule will call the add function, adding
schedule data into opts.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
with patch("salt.utils.schedule.clean_proc_dir", MagicMock(return_value=None)):
try:
mock_functions = {"test.ping": None}
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
minion.schedule = salt.utils.schedule.Schedule(
mock_opts,
mock_functions,
returners={},
new_instance=True,
)
minion.opts["foo"] = "bar"
schedule_data = {
"test_job": {
"function": "test.ping",
"return_job": False,
"jid_include": True,
"maxrunning": 2,
"seconds": 10,
}
}
data = {
"name": "test-item",
"schedule": schedule_data,
"func": "add",
"persist": False,
}
tag = "manage_schedule"
minion.manage_schedule(tag, data)
assert "test_job" in minion.opts["schedule"]
finally:
del minion.schedule
minion.destroy()
del minion
def test_minion_manage_beacons():
"""
Tests that the manage_beacons will call the add function, adding
beacon data into opts.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.join",
MagicMock(return_value=True),
):
try:
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["beacons"] = {}
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
mock_functions = {"test.ping": None}
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
minion.beacons = salt.beacons.Beacon(mock_opts, mock_functions)
bdata = [{"salt-master": "stopped"}, {"apache2": "stopped"}]
data = {"name": "ps", "beacon_data": bdata, "func": "add"}
tag = "manage_beacons"
log.debug("==== minion.opts %s ====", minion.opts)
minion.manage_beacons(tag, data)
assert "ps" in minion.opts["beacons"]
assert minion.opts["beacons"]["ps"] == bdata
finally:
minion.destroy()
def test_prep_ip_port():
_ip = ipaddress.ip_address
opts = {"master": "10.10.0.3", "master_uri_format": "ip_only"}
ret = salt.minion.prep_ip_port(opts)
assert ret == {"master": _ip("10.10.0.3")}
opts = {
"master": "10.10.0.3",
"master_port": 1234,
"master_uri_format": "default",
}
ret = salt.minion.prep_ip_port(opts)
assert ret == {"master": "10.10.0.3"}
opts = {"master": "10.10.0.3:1234", "master_uri_format": "default"}
ret = salt.minion.prep_ip_port(opts)
assert ret == {"master": "10.10.0.3", "master_port": 1234}
opts = {"master": "host name", "master_uri_format": "default"}
pytest.raises(SaltClientError, salt.minion.prep_ip_port, opts)
opts = {"master": "10.10.0.3:abcd", "master_uri_format": "default"}
pytest.raises(SaltClientError, salt.minion.prep_ip_port, opts)
opts = {"master": "10.10.0.3::1234", "master_uri_format": "default"}
pytest.raises(SaltClientError, salt.minion.prep_ip_port, opts)
@pytest.mark.skip_if_not_root
def test_sock_path_len():
"""
This tests whether or not a larger hash causes the sock path to exceed
the system's max sock path length. See the below link for more
information.
https://github.com/saltstack/salt/issues/12172#issuecomment-43903643
"""
opts = {
"id": "salt-testing",
"hash_type": "sha512",
"sock_dir": os.path.join(salt.syspaths.SOCK_DIR, "minion"),
"extension_modules": "",
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(opts, opts):
try:
event_publisher = event.AsyncEventPublisher(opts)
result = True
except ValueError:
# There are rare cases where we operate a closed socket, especially in containers.
# In this case, don't fail the test because we'll catch it down the road.
result = True
except SaltSystemExit:
result = False
assert result
@pytest.mark.skip_on_windows(reason="Skippin, no Salt master running on Windows.")
def test_master_type_failover():
"""
Tests master_type "failover" to not fall back to 127.0.0.1 address when master does not resolve in DNS
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts.update(
{
"master_type": "failover",
"master": ["master1", "master2"],
"__role": "",
"retry_dns": 0,
}
)
class MockPubChannel:
def connect(self):
raise SaltClientError("MockedChannel")
def close(self):
return
def mock_resolve_dns(opts, fallback=False):
assert not fallback
if opts["master"] == "master1":
raise SaltClientError("Cannot resolve {}".format(opts["master"]))
return {
"master_ip": "192.168.2.1",
"master_uri": "tcp://192.168.2.1:4505",
}
def mock_channel_factory(opts, **kwargs):
assert opts["master"] == "master2"
return MockPubChannel()
with patch("salt.minion.resolve_dns", mock_resolve_dns), patch(
"salt.channel.client.AsyncPubChannel.factory", mock_channel_factory
), patch("salt.loader.grains", MagicMock(return_value=[])):
with pytest.raises(SaltClientError):
minion = salt.minion.Minion(mock_opts)
yield minion.connect_master()
def test_master_type_failover_no_masters():
"""
Tests master_type "failover" to not fall back to 127.0.0.1 address when no master can be resolved
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts.update(
{
"master_type": "failover",
"master": ["master1", "master2"],
"__role": "",
"retry_dns": 0,
}
)
def mock_resolve_dns(opts, fallback=False):
assert not fallback
raise SaltClientError("Cannot resolve {}".format(opts["master"]))
with patch("salt.minion.resolve_dns", mock_resolve_dns), patch(
"salt.loader.grains", MagicMock(return_value=[])
):
with pytest.raises(SaltClientError):
minion = salt.minion.Minion(mock_opts)
yield minion.connect_master()
def test_config_cache_path_overrides():
cachedir = os.path.abspath("/path/to/master/cache")
opts = {"cachedir": cachedir, "conf_file": None}
mminion = salt.minion.MasterMinion(opts)
assert mminion.opts["cachedir"] == cachedir
def test_minion_grains_refresh_pre_exec_false():
"""
Minion does not refresh grains when grains_refresh_pre_exec is False
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["multiprocessing"] = False
mock_opts["grains_refresh_pre_exec"] = False
mock_data = {"fun": "foo.bar", "jid": 123}
with patch("salt.loader.grains") as grainsfunc, patch(
"salt.minion.Minion._target", MagicMock(return_value=True)
):
minion = salt.minion.Minion(
mock_opts,
jid_queue=None,
io_loop=salt.ext.tornado.ioloop.IOLoop(),
load_grains=False,
)
try:
ret = minion._handle_decoded_payload(mock_data).result()
grainsfunc.assert_not_called()
finally:
minion.destroy()
def test_minion_grains_refresh_pre_exec_true():
"""
Minion refreshes grains when grains_refresh_pre_exec is True
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["multiprocessing"] = False
mock_opts["grains_refresh_pre_exec"] = True
mock_data = {"fun": "foo.bar", "jid": 123}
with patch("salt.loader.grains") as grainsfunc, patch(
"salt.minion.Minion._target", MagicMock(return_value=True)
):
minion = salt.minion.Minion(
mock_opts,
jid_queue=None,
io_loop=salt.ext.tornado.ioloop.IOLoop(),
load_grains=False,
)
try:
ret = minion._handle_decoded_payload(mock_data).result()
grainsfunc.assert_called()
finally:
minion.destroy()
| saltstack/salt | tests/pytests/unit/test_minion.py | Python | apache-2.0 | 35,098 | 0.001197 |
#!/usr/bin/env python
import sys
# sys.dont_write_bytecode = True
import glob
import os
import time
import logging
import os.path
from argparse import ArgumentParser
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
def connect(self):
"""Convenience method that creates Server instance"""
from slackclient import SlackClient
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(.5)
def autoping(self):
# hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
logging.debug("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii', 'ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory + '/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory + '/plugins/')
for plugin in glob.glob(directory + '/plugins/*.py') + glob.glob(
directory + '/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
try:
self.bot_plugins.append(Plugin(name))
except:
import traceback
traceback_msg = traceback.format_exc()
logging.error("error loading plugin {name} {traceback_msg}".format(name=name, traceback_msg=traceback_msg))
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.jobs = []
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module." + function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
# this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module." + function_name)(data)
except:
logging.debug("problem in module {} {}".format(function_name, data))
else:
eval("self.module." + function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
logging.debug("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
logging.debug("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
try:
from config import Config
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = Config()
if os.path.exists('./rtmbot.conf'):
config.load_yaml(args.config or 'rtmbot.conf')
else:
config.load_os_environ_vars('FB__')
logging.basicConfig(stream=sys.stdout, filename='debug.log',
level=logging.DEBUG if config["DEBUG"] else logging.INFO)
logging.info('Bot is')
token = config["SLACK_TOKEN"]
debug = config["DEBUG"]
bot = RtmBot(token)
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config["DAEMON"] in ['True', True]:
import daemon
with daemon.DaemonContext():
main_loop()
else:
main_loop()
except:
import traceback
print traceback.format_exc()
| andela-kanyanwu/food-bot-review | rtmbot.py | Python | mit | 7,049 | 0.001844 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mail sending utilities. send_email() is the main API function
people should be using; just check out its docstring.
"""
__revision__ = "$Id$"
import os
import re
import sys
from cStringIO import StringIO
from time import sleep
import smtplib
import socket
from email import Encoders
from email.Header import Header
from email.MIMEBase import MIMEBase
from email.MIMEImage import MIMEImage
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
from formatter import DumbWriter, AbstractFormatter
from invenio.access_control_config import CFG_TEMP_EMAIL_ADDRESS
from invenio.config import \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME_INTL, \
CFG_SITE_NAME, \
CFG_SITE_ADMIN_EMAIL, \
CFG_MISCUTIL_SMTP_HOST, \
CFG_MISCUTIL_SMTP_PORT, \
CFG_VERSION, \
CFG_DEVEL_SITE
from invenio.errorlib import register_exception
from invenio.messages import wash_language, gettext_set_language
from invenio.miscutil_config import InvenioMiscUtilError
from invenio.textutils import guess_minimum_encoding
try:
from invenio.config import \
CFG_MISCUTIL_SMTP_USER,\
CFG_MISCUTIL_SMTP_PASS,\
CFG_MISCUTIL_SMTP_TLS
except ImportError:
CFG_MISCUTIL_SMTP_USER = ''
CFG_MISCUTIL_SMTP_PASS = ''
CFG_MISCUTIL_SMTP_TLS = False
def scheduled_send_email(fromaddr,
toaddr,
subject="",
content="",
header=None,
footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
user=None,
other_bibtasklet_arguments=None,
replytoaddr="",
bccaddr="",
):
"""
Like send_email, but send an email via the bibsched
infrastructure.
@param fromaddr: sender
@type fromaddr: string
@param toaddr: list of receivers
@type toaddr: string (comma separated) or list of strings
@param subject: the subject
@param content: the body of the message
@param header: optional header, otherwise default is used
@param footer: optional footer, otherwise default is used
@param copy_to_admin: set to 1 in order to send email the admins
@param attempt_times: try at least n times before giving up sending
@param attempt_sleeptime: number of seconds to sleep between two attempts
@param user: the user name to user when scheduling the bibtasklet. If
None, the sender will be used
@param other_bibtasklet_arguments: other arguments to append to the list
of arguments to the call of task_low_level_submission
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: the scheduled bibtasklet
"""
from invenio.bibtask import task_low_level_submission
if not isinstance(toaddr, (unicode, str)):
toaddr = ','.join(toaddr)
if not isinstance(replytoaddr, (unicode, str)):
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
if user is None:
user = fromaddr
if other_bibtasklet_arguments is None:
other_bibtasklet_arguments = []
else:
other_bibtasklet_arguments = list(other_bibtasklet_arguments)
if not header is None:
other_bibtasklet_arguments.extend(("-a", "header=%s" % header))
if not footer is None:
other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_send_email",
"-a", "fromaddr=%s" % fromaddr,
"-a", "toaddr=%s" % toaddr,
"-a", "replytoaddr=%s" % replytoaddr,
"-a", "subject=%s" % subject,
"-a", "content=%s" % content,
"-a", "copy_to_admin=%s" % copy_to_admin,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
"-a", "bccaddr=%s" % bccaddr,
*other_bibtasklet_arguments)
def send_email(fromaddr,
toaddr,
subject="",
content="",
html_content='',
html_images=None,
header=None,
footer=None,
html_header=None,
html_footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
debug_level=0,
ln=CFG_SITE_LANG,
charset=None,
replytoaddr="",
attachments=None,
bccaddr="",
forward_failures_to_admin=True,
):
"""Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly
header and footer.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ','). BEWARE: If more than once receiptiant is given,
the receivers are put in BCC and To will be "Undisclosed.Recipients:".
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] header to add, None for the Default
@param footer: [string] footer to add, None for the Default
@param html_header: [string] header to add to the html part, None for the Default
@param html_footer: [string] footer to add to the html part, None for the Default
@param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers
@param attempt_times: [int] number of tries
@param attempt_sleeptime: [int] seconds in between tries
@param debug_level: [int] debug level
@param ln: [string] invenio language
@param charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@param forward_failures_to_admin: [bool] prevents infinite recursion
in case of admin reporting,
when the problem is not in
the e-mail address format,
but rather in the network
If sending fails, try to send it ATTEMPT_TIMES, and wait for
ATTEMPT_SLEEPTIME seconds in between tries.
e.g.:
send_email('foo.bar@cern.ch', 'bar.foo@cern.ch', 'Let\'s try!'', 'check 1234', '<strong>check</strong> <em>1234</em><img src="cid:image1">', {'image1': '/tmp/quantum.jpg'})
@return: [bool]: True if email was sent okay, False if it was not.
"""
if html_images is None:
html_images = {}
if type(toaddr) is str:
toaddr = toaddr.strip().split(',')
toaddr = remove_temporary_emails(toaddr)
if type(bccaddr) is str:
bccaddr = bccaddr.strip().split(',')
usebcc = len(toaddr) > 1 # More than one address, let's use Bcc in place of To
if copy_to_admin:
if CFG_SITE_ADMIN_EMAIL not in toaddr:
toaddr.append(CFG_SITE_ADMIN_EMAIL)
if CFG_DEVEL_SITE: # if we are on a development site, we don't want to send external e-mails
content = """
--------------------------------------------------------------
This message would have been sent to the following recipients:
%s
--------------------------------------------------------------
%s""" % (toaddr, content)
toaddr = CFG_SITE_ADMIN_EMAIL
usebcc = False
body = forge_email(fromaddr, toaddr, subject, content, html_content,
html_images, usebcc, header, footer, html_header,
html_footer, ln, charset, replytoaddr, attachments,
bccaddr)
_ = gettext_set_language(CFG_SITE_LANG)
if attempt_times < 1 or not toaddr:
try:
raise InvenioMiscUtilError(_('The system is not attempting to send an email from %s, to %s, with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_NOT_ATTEMPTING_SEND_EMAIL', fromaddr, toaddr, body)
return False
sent = False
failure_reason = ''
failure_details = ''
while not sent and attempt_times > 0:
try:
server = smtplib.SMTP(CFG_MISCUTIL_SMTP_HOST, CFG_MISCUTIL_SMTP_PORT)
if debug_level > 2:
server.set_debuglevel(1)
else:
server.set_debuglevel(0)
if CFG_MISCUTIL_SMTP_TLS:
server.ehlo()
server.starttls()
server.ehlo()
if CFG_MISCUTIL_SMTP_USER and CFG_MISCUTIL_SMTP_PASS:
server.login(CFG_MISCUTIL_SMTP_USER, CFG_MISCUTIL_SMTP_PASS)
if isinstance(toaddr, basestring):
toaddr = [toaddr]
server.sendmail(fromaddr, toaddr + bccaddr, body)
server.quit()
sent = True
except (smtplib.SMTPException, socket.error) as e:
failure_reason = type(e).__name__
failure_details = str(e)
register_exception()
if debug_level > 1:
try:
raise InvenioMiscUtilError(_('Error in connecting to the SMPT server waiting %s seconds. Exception is %s, while sending email from %s to %s with body %s.') % (attempt_sleeptime, sys.exc_info()[0], fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_CONNECTION_SMTP', attempt_sleeptime,
# sys.exc_info()[0], fromaddr, toaddr, body)
if not sent:
attempt_times -= 1
if attempt_times > 0: # sleep only if we shall retry again
sleep(attempt_sleeptime)
if not sent:
# report failure to the admin with the intended message, its
# sender and recipients
if forward_failures_to_admin:
# prepend '> ' to every line of the original message
quoted_body = '> ' + '> '.join(body.splitlines(True))
# define and fill in the report template
admin_report_subject = _('Error while sending an email: %s') % (subject)
admin_report_body = _("\nError while sending an email.\n"
"Reason: %s\n"
"Details: %s\n"
"Sender: \"%s\"\n"
"Recipient(s): \"%s\"\n\n"
"The content of the mail was as follows:\n"
"%s") % (failure_reason, failure_details,
fromaddr, ', '.join(toaddr),
quoted_body)
send_email(CFG_SITE_ADMIN_EMAIL, CFG_SITE_ADMIN_EMAIL,
admin_report_subject, admin_report_body,
forward_failures_to_admin=False)
try:
raise InvenioMiscUtilError(_('Error in sending email from %s to %s with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_SENDING_EMAIL', fromaddr, toaddr, body)
return sent
def email_header(ln=CFG_SITE_LANG):
"""The header of the email
@param ln: language
@return: header as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard header
out = """%(hello)s
""" % {
'hello': _("Hello:")
}
return out
def email_html_header(ln=CFG_SITE_LANG):
"""The header of the email
@param ln: language
@return: header as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard header
out = """%(hello)s<br />
""" % {
'hello': _("Hello:")
}
return out
def email_footer(ln=CFG_SITE_LANG):
"""The footer of the email
@param ln: language
@return: footer as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard footer
out = """\n\n%(best_regards)s
--
%(sitename)s <%(siteurl)s>
%(need_intervention_please_contact)s <%(sitesupportemail)s>
""" % {
'sitename': CFG_SITE_NAME_INTL[ln],
'best_regards': _("Best regards"),
'siteurl': CFG_SITE_URL,
'need_intervention_please_contact': _("Need human intervention? Contact"),
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL
}
return out
def email_html_footer(ln=CFG_SITE_LANG):
"""The html footer of the email
@param ln: language
@return: footer as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard footer
out = """<br /><br /><em>%(best_regards)s</em>
<hr />
<a href="%(siteurl)s"><strong>%(sitename)s</strong></a><br />
%(need_intervention_please_contact)s <a href="mailto:%(sitesupportemail)s">%(sitesupportemail)s</a>
""" % {
'sitename': CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'best_regards': _("Best regards"),
'siteurl': CFG_SITE_URL,
'need_intervention_please_contact': _("Need human intervention? Contact"),
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL
}
return out
def forge_email(fromaddr, toaddr, subject, content, html_content='',
html_images=None, usebcc=False, header=None, footer=None,
html_header=None, html_footer=None, ln=CFG_SITE_LANG,
charset=None, replytoaddr="", attachments=None, bccaddr=""):
"""Prepare email. Add header and footer if needed.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ',')
@param usebcc: [bool] True for using Bcc in place of To
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] None for the default header
@param footer: [string] None for the default footer
@param ln: language
@charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: forged email as a string"""
if html_images is None:
html_images = {}
if header is None:
content = email_header(ln) + content
else:
content = header + content
if footer is None:
content += email_footer(ln)
else:
content += footer
if charset is None:
(content, content_charset) = guess_minimum_encoding(content)
else:
content_charset = charset
subject = get_mail_header(subject)
fromaddr = get_mail_header(fromaddr)
toaddr = get_mail_header(toaddr)
replytoaddr = get_mail_header(replytoaddr)
bccaddr = get_mail_header(bccaddr)
toaddr = remove_temporary_emails(toaddr)
if html_content:
if html_header is None:
html_content = email_html_header(ln) + html_content
else:
html_content = html_header + html_content
if html_footer is None:
html_content += email_html_footer(ln)
else:
html_content += html_footer
if charset is None:
(html_content, html_content_charset) = guess_minimum_encoding(html_content)
else:
html_content_charset = charset
msg_root = MIMEMultipart('alternative')
msg_root.preamble = 'This is a multi-part message in MIME format.'
msg_text = MIMEText(content, _charset=content_charset)
msg_root.attach(msg_text)
msg_text = MIMEText(html_content, 'html', _charset=html_content_charset)
if not html_images:
# No image? Attach the HTML to the root
msg_root.attach(msg_text)
else:
# Image(s)? Attach the HTML and image(s) as children of a
# "related" block
msg_related = MIMEMultipart('related')
msg_related.attach(msg_text)
for image_id, image_path in html_images.iteritems():
msg_image = MIMEImage(open(image_path, 'rb').read())
msg_image.add_header('Content-ID', '<%s>' % image_id)
msg_image.add_header('Content-Disposition', 'attachment', filename=os.path.split(image_path)[1])
msg_related.attach(msg_image)
msg_root.attach(msg_related)
else:
msg_root = MIMEText(content, _charset=content_charset)
if attachments:
from invenio.bibdocfile import _mimes, guess_format_from_url
old_msg_root = msg_root
msg_root = MIMEMultipart()
msg_root.attach(old_msg_root)
for attachment in attachments:
try:
if type(attachment) in (list, tuple):
attachment, mime = attachment
if mime is None:
## Automatic guessing of mimetype
mime = _mimes.guess_type(attachment)[0]
if mime is None:
ext = guess_format_from_url(attachment)
mime = _mimes.guess_type("foo" + ext)[0]
if not mime:
mime = 'application/octet-stream'
part = MIMEBase(*mime.split('/', 1))
part.set_payload(open(attachment, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attachment))
msg_root.attach(part)
except:
register_exception(alert_admin=True, prefix="Can't attach %s" % attachment)
msg_root['From'] = fromaddr
if replytoaddr:
msg_root['Reply-To'] = replytoaddr
if usebcc:
msg_root['Bcc'] = toaddr
msg_root['To'] = 'Undisclosed.Recipients:'
if bccaddr:
msg_root['Bcc'] += ",%s" % (bccaddr,)
else:
msg_root['To'] = toaddr
if bccaddr:
msg_root['Bcc'] = bccaddr
msg_root['Date'] = formatdate(localtime=True)
msg_root['Subject'] = subject
msg_root['User-Agent'] = 'Invenio %s at %s' % (CFG_VERSION, CFG_SITE_URL)
return msg_root.as_string()
RE_NEWLINES = re.compile(r'<br\s*/?>|</p>', re.I)
RE_SPACES = re.compile(r'\s+')
RE_HTML_TAGS = re.compile(r'<.+?>')
def email_strip_html(html_content):
"""Strip html tags from html_content, trying to respect formatting."""
html_content = RE_SPACES.sub(' ', html_content)
html_content = RE_NEWLINES.sub('\n', html_content)
html_content = RE_HTML_TAGS.sub('', html_content)
html_content = html_content.split('\n')
out = StringIO()
out_format = AbstractFormatter(DumbWriter(out))
for row in html_content:
out_format.add_flowing_data(row)
out_format.end_paragraph(1)
return out.getvalue()
_RE_TEMPORARY_EMAIL = re.compile(CFG_TEMP_EMAIL_ADDRESS % r'.+?', re.I)
def remove_temporary_emails(emails):
"""
Removes the temporary emails (which are constructed randomly when user logs in
with an external authentication provider which doesn't supply an email
address) from an email list.
@param emails: email list (if string, then receivers are separated by ',')
@type emails: [str]|str
@rtype: list|str
"""
if type(emails) in (str, unicode):
emails = [email.strip() for email in emails.split(',') if email.strip()]
emails = [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
return ','.join(emails)
else:
return [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
def get_mail_header(value):
"""
Return a MIME-compliant header-string. Will join lists of strings
into one string with comma (,) as separator.
"""
if not isinstance(value, basestring):
value = ','.join(value)
try:
value = value.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
value = Header(value, 'utf-8')
return value
| CERNDocumentServer/invenio | modules/miscutil/lib/mailutils.py | Python | gpl-2.0 | 22,698 | 0.001983 |
import os
from torch.utils.ffi import create_extension
sources = ["src/lib_cffi.cpp"]
headers = ["src/lib_cffi.h"]
extra_objects = ["src/bn.o"]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
"_ext",
headers=headers,
sources=sources,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects,
extra_compile_args=["-std=c++11"],
)
if __name__ == "__main__":
ffi.build()
| Diyago/Machine-Learning-scripts | DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/modules/build.py | Python | apache-2.0 | 544 | 0 |
#!/usr/bin/env python
import gtk, sys, string
class Socket:
def __init__(self):
window = gtk.Window()
window.set_default_size(200, 200)
socket = gtk.Socket()
window.add(socket)
print "Socket ID:", socket.get_id()
if len(sys.argv) == 2:
socket.add_id(long(sys.argv[1]))
window.connect("destroy", gtk.main_quit)
socket.connect("plug-added", self.plugged_event)
window.show_all()
def plugged_event(self, widget):
print "A plug has been inserted."
Socket()
gtk.main()
| Programmica/pygtk-tutorial | examples/socket.py | Python | cc0-1.0 | 572 | 0.005245 |
"""Ensure videos emit proper events"""
import datetime
import json
from nose.plugins.attrib import attr
import ddt
from common.test.acceptance.tests.helpers import EventsTestMixin
from common.test.acceptance.tests.video.test_video_module import VideoBaseTest
from common.test.acceptance.pages.lms.video.video import _parse_time_str
from openedx.core.lib.tests.assertions.events import assert_event_matches, assert_events_equal
from opaque_keys.edx.keys import UsageKey, CourseKey
class VideoEventsTestMixin(EventsTestMixin, VideoBaseTest):
"""
Useful helper methods to test video player event emission.
"""
def assert_payload_contains_ids(self, video_event):
"""
Video events should all contain "id" and "code" attributes in their payload.
This function asserts that those fields are present and have correct values.
"""
video_descriptors = self.course_fixture.get_nested_xblocks(category='video')
video_desc = video_descriptors[0]
video_locator = UsageKey.from_string(video_desc.locator)
expected_event = {
'event': {
'id': video_locator.html_id(),
'code': '3_yD_cEKoCk'
}
}
self.assert_events_match([expected_event], [video_event])
def assert_valid_control_event_at_time(self, video_event, time_in_seconds):
"""
Video control events should contain valid ID fields and a valid "currentTime" field.
This function asserts that those fields are present and have correct values.
"""
current_time = json.loads(video_event['event'])['currentTime']
self.assertAlmostEqual(current_time, time_in_seconds, delta=1)
def assert_field_type(self, event_dict, field, field_type):
"""Assert that a particular `field` in the `event_dict` has a particular type"""
self.assertIn(field, event_dict, '{0} not found in the root of the event'.format(field))
self.assertTrue(
isinstance(event_dict[field], field_type),
'Expected "{key}" to be a "{field_type}", but it has the value "{value}" of type "{t}"'.format(
key=field,
value=event_dict[field],
t=type(event_dict[field]),
field_type=field_type,
)
)
class VideoEventsTest(VideoEventsTestMixin):
""" Test video player event emission """
def test_video_control_events(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
And I play the video
And I watch 5 seconds of it
And I pause the video
Then a "load_video" event is emitted
And a "play_video" event is emitted
And a "pause_video" event is emitted
"""
def is_video_event(event):
"""Filter out anything other than the video events of interest"""
return event['event_type'] in ('load_video', 'play_video', 'pause_video')
captured_events = []
with self.capture_events(is_video_event, number_of_matches=3, captured_events=captured_events):
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:05')
self.video.click_player_button('pause')
for idx, video_event in enumerate(captured_events):
self.assert_payload_contains_ids(video_event)
if idx == 0:
assert_event_matches({'event_type': 'load_video'}, video_event)
elif idx == 1:
assert_event_matches({'event_type': 'play_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
elif idx == 2:
assert_event_matches({'event_type': 'pause_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, self.video.seconds)
def test_strict_event_format(self):
"""
This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new
fields are not added to all events mistakenly. It should be the only existing test that is updated when new top
level fields are added to all events.
"""
captured_events = []
with self.capture_events(lambda e: e['event_type'] == 'load_video', captured_events=captured_events):
self.navigate_to_video()
load_video_event = captured_events[0]
# Validate the event payload
self.assert_payload_contains_ids(load_video_event)
# We cannot predict the value of these fields so we make weaker assertions about them
dynamic_string_fields = (
'accept_language',
'agent',
'host',
'ip',
'event',
'session'
)
for field in dynamic_string_fields:
self.assert_field_type(load_video_event, field, basestring)
self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field))
del load_video_event[field]
# A weak assertion for the timestamp as well
self.assert_field_type(load_video_event, 'time', datetime.datetime)
del load_video_event['time']
# Note that all unpredictable fields have been deleted from the event at this point
course_key = CourseKey.from_string(self.course_id)
static_fields_pattern = {
'context': {
'course_id': unicode(course_key),
'org_id': course_key.org,
'path': '/event',
'user_id': self.user_info['user_id']
},
'event_source': 'browser',
'event_type': 'load_video',
'username': self.user_info['username'],
'page': self.browser.current_url,
'referer': self.browser.current_url,
'name': 'load_video',
}
assert_events_equal(static_fields_pattern, load_video_event)
@attr(shard=8)
@ddt.ddt
class VideoBumperEventsTest(VideoEventsTestMixin):
""" Test bumper video event emission """
# helper methods
def watch_video_and_skip(self):
"""
Wait 5 seconds and press "skip" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('skip_bumper')
def watch_video_and_dismiss(self):
"""
Wait 5 seconds and press "do not show again" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('do_not_show_again')
def wait_for_state(self, state='finished'):
"""
Wait until video will be in given state.
Finished state means that video is played to the end.
"""
self.video.wait_for_state(state)
def add_bumper(self):
"""
Add video bumper to the course.
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.course_fixture.add_advanced_settings(additional_data)
@ddt.data(
('edx.video.bumper.skipped', watch_video_and_skip),
('edx.video.bumper.dismissed', watch_video_and_dismiss),
('edx.video.bumper.stopped', wait_for_state)
)
@ddt.unpack
def test_video_control_events(self, event_type, action):
"""
Scenario: Video component with pre-roll emits events correctly
Given the course has a Video component in "Youtube" mode with pre-roll enabled
And I click on the video poster
And the pre-roll video start playing
And I watch (5 seconds/5 seconds/to the end of) it
And I click (skip/do not show again) video button
Then a "edx.video.bumper.loaded" event is emitted
And a "edx.video.bumper.played" event is emitted
And a "edx.video.bumper.skipped/dismissed/stopped" event is emitted
And a "load_video" event is emitted
And a "play_video" event is emitted
"""
def is_video_event(event):
"""Filter out anything other than the video events of interest"""
return event['event_type'] in (
'edx.video.bumper.loaded',
'edx.video.bumper.played',
'edx.video.bumper.skipped',
'edx.video.bumper.dismissed',
'edx.video.bumper.stopped',
'load_video',
'play_video',
'pause_video'
) and self.video.state != 'buffering'
captured_events = []
self.add_bumper()
with self.capture_events(is_video_event, number_of_matches=5, captured_events=captured_events):
self.navigate_to_video_no_render()
self.video.click_on_poster()
self.video.wait_for_video_bumper_render()
sources, duration = self.video.sources[0], self.video.duration
action(self)
# Filter subsequent events that appear due to bufferisation: edx.video.bumper.played
# As bumper does not emit pause event, we filter subsequent edx.video.bumper.played events from
# the list, except first.
filtered_events = []
for video_event in captured_events:
is_played_event = video_event['event_type'] == 'edx.video.bumper.played'
appears_again = filtered_events and video_event['event_type'] == filtered_events[-1]['event_type']
if is_played_event and appears_again:
continue
filtered_events.append(video_event)
for idx, video_event in enumerate(filtered_events):
if idx < 3:
self.assert_bumper_payload_contains_ids(video_event, sources, duration)
else:
self.assert_payload_contains_ids(video_event)
if idx == 0:
assert_event_matches({'event_type': 'edx.video.bumper.loaded'}, video_event)
elif idx == 1:
assert_event_matches({'event_type': 'edx.video.bumper.played'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
elif idx == 2:
assert_event_matches({'event_type': event_type}, video_event)
elif idx == 3:
assert_event_matches({'event_type': 'load_video'}, video_event)
elif idx == 4:
assert_event_matches({'event_type': 'play_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
def assert_bumper_payload_contains_ids(self, video_event, sources, duration):
"""
Bumper video events should all contain "host_component_id", "bumper_id",
"duration", "code" attributes in their payload.
This function asserts that those fields are present and have correct values.
"""
self.add_bumper()
video_descriptors = self.course_fixture.get_nested_xblocks(category='video')
video_desc = video_descriptors[0]
video_locator = UsageKey.from_string(video_desc.locator)
expected_event = {
'event': {
'host_component_id': video_locator.html_id(),
'bumper_id': sources,
'duration': _parse_time_str(duration),
'code': 'html5'
}
}
self.assert_events_match([expected_event], [video_event])
def test_strict_event_format(self):
"""
This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new
fields are not added to all events mistakenly. It should be the only existing test that is updated when new top
level fields are added to all events.
"""
captured_events = []
self.add_bumper()
filter_event = lambda e: e['event_type'] == 'edx.video.bumper.loaded'
with self.capture_events(filter_event, captured_events=captured_events):
self.navigate_to_video_no_render()
self.video.click_on_poster()
load_video_event = captured_events[0]
# Validate the event payload
sources, duration = self.video.sources[0], self.video.duration
self.assert_bumper_payload_contains_ids(load_video_event, sources, duration)
# We cannot predict the value of these fields so we make weaker assertions about them
dynamic_string_fields = (
'accept_language',
'agent',
'host',
'ip',
'event',
'session'
)
for field in dynamic_string_fields:
self.assert_field_type(load_video_event, field, basestring)
self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field))
del load_video_event[field]
# A weak assertion for the timestamp as well
self.assert_field_type(load_video_event, 'time', datetime.datetime)
del load_video_event['time']
# Note that all unpredictable fields have been deleted from the event at this point
course_key = CourseKey.from_string(self.course_id)
static_fields_pattern = {
'context': {
'course_id': unicode(course_key),
'org_id': course_key.org,
'path': '/event',
'user_id': self.user_info['user_id']
},
'event_source': 'browser',
'event_type': 'edx.video.bumper.loaded',
'username': self.user_info['username'],
'page': self.browser.current_url,
'referer': self.browser.current_url,
'name': 'edx.video.bumper.loaded',
}
assert_events_equal(static_fields_pattern, load_video_event)
| louyihua/edx-platform | common/test/acceptance/tests/video/test_video_events.py | Python | agpl-3.0 | 13,984 | 0.002932 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/QT/up-to-date.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Validate that a stripped-down real-world Qt configuation (thanks
to Leanid Nazdrynau) with a generated .h file is correctly
up-to-date after a build.
(This catches a bug that was introduced during a signature refactoring
ca. September 2005.)
"""
import os
import TestSCons
_obj = TestSCons._obj
test = TestSCons.TestSCons()
if not os.environ.get('QTDIR', None):
x ="External environment variable $QTDIR not set; skipping test(s).\n"
test.skip_test(x)
test.subdir('layer',
['layer', 'aclock'],
['layer', 'aclock', 'qt_bug'])
test.write('SConstruct', """\
import os
aa=os.getcwd()
env=Environment(tools=['default','expheaders','qt'],toolpath=[aa])
env["EXP_HEADER_ABS"]=os.path.join(os.getcwd(),'include')
if not os.access(env["EXP_HEADER_ABS"],os.F_OK):
os.mkdir (env["EXP_HEADER_ABS"])
Export('env')
env.SConscript('layer/aclock/qt_bug/SConscript')
""")
test.write('expheaders.py', """\
import SCons.Defaults
def ExpHeaderScanner(node, env, path):
return []
def generate(env):
HeaderAction=SCons.Action.Action([SCons.Defaults.Copy('$TARGET','$SOURCE'),SCons.Defaults.Chmod('$TARGET',0755)])
HeaderBuilder= SCons.Builder.Builder(action=HeaderAction)
env['BUILDERS']['ExportHeaders'] = HeaderBuilder
def exists(env):
return 0
""")
test.write(['layer', 'aclock', 'qt_bug', 'SConscript'], """\
import os
Import ("env")
env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'main.h'), 'main.h')
env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'migraform.h'), 'migraform.h')
env.Append(CPPPATH=env["EXP_HEADER_ABS"])
env.StaticLibrary('all',['main.ui','migraform.ui','my.cc'])
""")
test.write(['layer', 'aclock', 'qt_bug', 'main.ui'], """\
<!DOCTYPE UI><UI version="3.3" stdsetdef="1">
<class>Main</class>
<widget class="QWizard">
<property name="name">
<cstring>Main</cstring>
</property>
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>600</width>
<height>385</height>
</rect>
</property>
</widget>
<includes>
<include location="local" impldecl="in implementation">migraform.h</include>
</includes>
</UI>
""")
test.write(['layer', 'aclock', 'qt_bug', 'migraform.ui'], """\
<!DOCTYPE UI><UI version="3.3" stdsetdef="1">
<class>MigrateForm</class>
<widget class="QWizard">
<property name="name">
<cstring>MigrateForm</cstring>
</property>
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>600</width>
<height>385</height>
</rect>
</property>
</widget>
</UI>
""")
test.write(['layer', 'aclock', 'qt_bug', 'my.cc'], """\
#include <main.h>
""")
my_obj = 'layer/aclock/qt_bug/my'+_obj
test.run(arguments = my_obj, stderr=None)
expect = my_obj.replace( '/', os.sep )
test.up_to_date(options = '--debug=explain',
arguments = (expect),
stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/QT/up-to-date.py | Python | mit | 4,303 | 0.002789 |
import stats_buffer
import util_cli as util
class BucketSummary:
def run(self, accessor):
return stats_buffer.bucket_info
class DGMRatio:
def run(self, accessor):
result = []
hdd_total = 0
ram_total = 0
for node, nodeinfo in stats_buffer.nodes.iteritems():
if nodeinfo["StorageInfo"].has_key("hdd"):
hdd_total += nodeinfo['StorageInfo']['hdd']['usedByData']
if nodeinfo["StorageInfo"].has_key("ram"):
ram_total += nodeinfo['StorageInfo']['ram']['usedByData']
if ram_total > 0:
ratio = hdd_total / ram_total
else:
ratio = 0
return ratio
class ARRatio:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
item_avg = {
"curr_items": [],
"vb_replica_curr_items": [],
}
num_error = []
for counter in accessor["counter"]:
values = stats_info[accessor["scale"]][counter]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
item_avg[counter].append((node, avg))
res = []
active_total = replica_total = 0
for active, replica in zip(item_avg['curr_items'], item_avg['vb_replica_curr_items']):
if replica[1] == 0:
res.append((active[0], "No replica"))
else:
ratio = 1.0 * active[1] / replica[1]
res.append((active[0], util.pretty_float(ratio)))
if ratio < accessor["threshold"]:
num_error.append({"node":active[0], "value": ratio})
active_total += active[1]
replica_total += replica[1]
if replica_total == 0:
res.append(("total", "no replica"))
else:
ratio = active_total * 1.0 / replica_total
cluster += ratio
res.append(("total", util.pretty_float(ratio)))
if ratio != accessor["threshold"]:
num_error.append({"node":"total", "value": ratio})
if len(num_error) > 0:
res.append(("error", num_error))
result[bucket] = res
result["cluster"] = util.pretty_float(cluster / len(stats_buffer.buckets))
return result
class OpsRatio:
def run(self, accessor):
result = {}
for bucket, stats_info in stats_buffer.buckets.iteritems():
ops_avg = {
"cmd_get": [],
"cmd_set": [],
"delete_hits" : [],
}
for counter in accessor["counter"]:
values = stats_info[accessor["scale"]][counter]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
ops_avg[counter].append((node, avg))
res = []
read_total = write_total = del_total = 0
for read, write, delete in zip(ops_avg['cmd_get'], ops_avg['cmd_set'], ops_avg['delete_hits']):
count = read[1] + write[1] + delete[1]
if count == 0:
res.append((read[0], "0:0:0"))
else:
read_ratio = read[1] *100 / count
read_total += read_ratio
write_ratio = write[1] * 100 / count
write_total += write_ratio
del_ratio = delete[1] * 100 / count
del_total += del_ratio
res.append((read[0], "{0}:{1}:{2}".format(int(read_ratio+.5), int(write_ratio+.5), int(del_ratio+.5))))
read_total /= len(ops_avg['cmd_get'])
write_total /= len(ops_avg['cmd_set'])
del_total /= len(ops_avg['delete_hits'])
res.append(("total", "{0}:{1}:{2}".format(int(read_total+.5), int(write_total+.5), int(del_total+.5))))
result[bucket] = res
return result
class CacheMissRatio:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
trend = []
total = 0
data = []
num_error = []
for node, vals in nodeStats.iteritems():
#a, b = util.linreg(timestamps, vals)
value = sum(vals) / samplesCount
total += value
if value > accessor["threshold"]:
num_error.append({"node":node, "value":value})
trend.append((node, util.pretty_float(value)))
data.append(value)
total /= len(nodeStats)
trend.append(("total", util.pretty_float(total)))
trend.append(("variance", util.two_pass_variance(data)))
if len(num_error) > 0:
trend.append(("error", num_error))
cluster += total
result[bucket] = trend
if len(stats_buffer.buckets) > 0:
result["cluster"] = util.pretty_float(cluster / len(stats_buffer.buckets))
return result
class MemUsed:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
trend = []
total = 0
data = []
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
trend.append((node, util.size_label(avg)))
data.append(avg)
#print data
trend.append(("variance", util.two_pass_variance(data)))
result[bucket] = trend
return result
class ItemGrowth:
def run(self, accessor):
result = {}
start_cluster = 0
end_cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
trend = []
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
a, b = util.linreg(timestamps, vals)
if b < 1:
trend.append((node, 0))
else:
start_val = b
start_cluster += b
end_val = a * timestamps[-1] + b
end_cluster += end_val
rate = (end_val * 1.0 / b - 1.0) * 100
trend.append((node, util.pretty_float(rate) + "%"))
result[bucket] = trend
if len(stats_buffer.buckets) > 0:
rate = (end_cluster * 1.0 / start_cluster - 1.0) * 100
result["cluster"] = util.pretty_float(rate) + "%"
return result
class NumVbuckt:
def run(self, accessor):
result = {}
for bucket, stats_info in stats_buffer.buckets.iteritems():
num_error = []
values = stats_info[accessor["scale"]][accessor["counter"]]
nodeStats = values["nodeStats"]
for node, vals in nodeStats.iteritems():
if vals[-1] < accessor["threshold"]:
num_error.append({"node":node, "value": int(vals[-1])})
if len(num_error) > 0:
result[bucket] = {"error" : num_error}
return result
class RebalanceStuck:
def run(self, accessor):
result = {}
for bucket, bucket_stats in stats_buffer.node_stats.iteritems():
num_error = []
for node, stats_info in bucket_stats.iteritems():
for key, value in stats_info.iteritems():
if key.find(accessor["counter"]) >= 0:
if accessor.has_key("threshold"):
if int(value) > accessor["threshold"]:
num_error.append({"node":node, "value": (key, value)})
else:
num_error.append({"node":node, "value": (key, value)})
if len(num_error) > 0:
result[bucket] = {"error" : num_error}
return result
class MemoryFramentation:
def run(self, accessor):
result = {}
for bucket, bucket_stats in stats_buffer.node_stats.iteritems():
num_error = []
for node, stats_info in bucket_stats.iteritems():
for key, value in stats_info.iteritems():
if key.find(accessor["counter"]) >= 0:
if accessor.has_key("threshold"):
if int(value) > accessor["threshold"]:
if accessor.has_key("unit"):
if accessor["unit"] == "time":
num_error.append({"node":node, "value": (key, util.time_label(value))})
elif accessor["unit"] == "size":
num_error.append({"node":node, "value": (key, util.size_label(value))})
else:
num_error.append({"node":node, "value": (key, value)})
else:
num_error.append({"node":node, "value": (key, value)})
if len(num_error) > 0:
result[bucket] = {"error" : num_error}
return result
class EPEnginePerformance:
def run(self, accessor):
result = {}
for bucket, bucket_stats in stats_buffer.node_stats.iteritems():
num_error = []
for node, stats_info in bucket_stats.iteritems():
for key, value in stats_info.iteritems():
if key.find(accessor["counter"]) >= 0:
if accessor.has_key("threshold"):
if accessor["counter"] == "flusherState" and value != accessor["threshold"]:
num_error.append({"node":node, "value": (key, value)})
elif accessor["counter"] == "flusherCompleted" and value == accessor["threshold"]:
num_error.append({"node":node, "value": (key, value)})
else:
if value > accessor["threshold"]:
num_error.append({"node":node, "value": (key, value)})
if len(num_error) > 0:
result[bucket] = {"error" : num_error}
return result
class TotalDataSize:
def run(self, accessor):
result = []
total = 0
for node, nodeinfo in stats_buffer.nodes.iteritems():
if nodeinfo["StorageInfo"].has_key("hdd"):
total += nodeinfo['StorageInfo']['hdd']['usedByData']
result.append(util.size_label(total))
return result
class AvailableDiskSpace:
def run(self, accessor):
result = []
total = 0
for node, nodeinfo in stats_buffer.nodes.iteritems():
if nodeinfo["StorageInfo"].has_key("hdd"):
total += nodeinfo['StorageInfo']['hdd']['free']
result.append(util.size_label(total))
return result
ClusterCapsule = [
{"name" : "TotalDataSize",
"ingredients" : [
{
"name" : "totalDataSize",
"description" : "Total Data Size across cluster",
"code" : "TotalDataSize",
}
],
"clusterwise" : True,
"perNode" : False,
"perBucket" : False,
},
{"name" : "AvailableDiskSpace",
"ingredients" : [
{
"name" : "availableDiskSpace",
"description" : "Available disk space",
"code" : "AvailableDiskSpace",
}
],
"clusterwise" : True,
"perNode" : False,
"perBucket" : False,
},
{"name" : "CacheMissRatio",
"ingredients" : [
{
"name" : "cacheMissRatio",
"description" : "Cache miss ratio",
"counter" : "ep_cache_miss_rate",
"scale" : "hour",
"code" : "CacheMissRatio",
"threshold" : 2,
},
],
"clusterwise" : True,
"perNode" : True,
"perBucket" : True,
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
},
"nodeDisparate" : True,
},
{"name" : "DGM",
"ingredients" : [
{
"name" : "dgm",
"description" : "Disk to Memory Ratio",
"code" : "DGMRatio"
},
],
"clusterwise" : True,
"perNode" : False,
"perBucket" : False,
},
{"name" : "ActiveReplicaResidentRatio",
"ingredients" : [
{
"name" : "activeReplicaResidencyRatio",
"description" : "Active and Replica Resident Ratio",
"counter" : ["curr_items", "vb_replica_curr_items"],
"scale" : "minute",
"code" : "ARRatio",
"threshold" : 1,
},
],
"clusterwise" : True,
"perNode" : True,
"perBucket" : True,
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
},
},
{"name" : "OPSPerformance",
"ingredients" : [
{
"name" : "opsPerformance",
"description" : "Read/Write/Delete ops ratio",
"scale" : "minute",
"counter" : ["cmd_get", "cmd_set", "delete_hits"],
"code" : "OpsRatio",
},
],
"perBucket" : True,
},
{"name" : "GrowthRate",
"ingredients" : [
{
"name" : "dataGrowthRateForItems",
"description" : "Data Growth rate for items",
"counter" : "curr_items",
"scale" : "day",
"code" : "ItemGrowth",
"unit" : "percentage",
},
],
"clusterwise" : True,
},
{"name" : "VBucketNumber",
"ingredients" : [
{
"name" : "activeVbucketNumber",
"description" : "Active VBucket number is less than expected",
"counter" : "vb_active_num",
"scale" : "hour",
"code" : "NumVbuckt",
"threshold" : 1024,
},
{
"name" : "replicaVBucketNumber",
"description" : "Replica VBucket number is less than expected",
"counter" : "vb_replica_num",
"scale" : "hour",
"code" : "NumVbuckt",
"threshold" : 1024,
},
],
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
},
},
{"name" : "MemoryUsage",
"ingredients" : [
{
"name" : "memoryUsage",
"description" : "Check memory usage",
"counter" : "mem_used",
"scale" : "hour",
"code" : "MemUsed",
},
],
"nodeDisparate" : True,
},
{"name" : "RebalancePerformance",
"ingredients" : [
{
"name" : "rebalanceStuck",
"description" : "Check if rebalance is stuck",
"counter" : "idle",
"code" : "RebalanceStuck",
},
{
"name" : "highBackfillRemaing",
"description" : "Tap queue backfilll remaining is too high",
"counter" : "ep_tap_queue_backfillremaining",
"code" : "RebalanceStuck",
"threshold" : 1000,
},
],
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
}
},
{"name" : "MemoryFragmentation",
"ingredients" : [
{
"name" : "totalFragmentation",
"description" : "Total memory fragmentation",
"counter" : "total_fragmentation_bytes",
"code" : "MemoryFramentation",
"unit" : "size",
"threshold" : 1073741824, # 1GB
},
{
"name" : "diskDelete",
"description" : "Averge disk delete time",
"counter" : "disk_del",
"code" : "MemoryFramentation",
"unit" : "time",
"threshold" : 1000 #1ms
},
{
"name" : "diskUpdate",
"description" : "Averge disk update time",
"counter" : "disk_update",
"code" : "MemoryFramentation",
"unit" : "time",
"threshold" : 1000 #1ms
},
{
"name" : "diskInsert",
"description" : "Averge disk insert time",
"type" : "python",
"counter" : "disk_insert",
"code" : "MemoryFramentation",
"unit" : "time",
"threshold" : 1000 #1ms
},
{
"name" : "diskCommit",
"description" : "Averge disk commit time",
"counter" : "disk_commit",
"code" : "MemoryFramentation",
"unit" : "time",
"threshold" : 5000000 #10s
},
],
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
},
},
{"name" : "EPEnginePerformance",
"ingredients" : [
{
"name" : "flusherState",
"description" : "Engine flusher state",
"counter" : "ep_flusher_state",
"code" : "EPEnginePerformance",
"threshold" : "running",
},
{
"name" : "flusherCompleted",
"description" : "Flusher completed",
"counter" : "ep_flusher_num_completed",
"code" : "EPEnginePerformance",
"threshold" : 0
},
{
"name" : "avgItemLoadTime",
"description" : "Average item loaded time",
"counter" : "ep_bg_load_avg",
"code" : "EPEnginePerformance",
"threshold" : 100,
},
{
"name" : "avgItemWaitTime",
"description" : "Averge item waited time",
"counter" : "ep_bg_wait_avg",
"code" : "EPEnginePerformance",
"threshold" : 100
},
],
"indicator" : {
"cause" : "blah",
"impact" : "blah",
"action" : "blah",
},
},
]
| bcui6611/healthchecker | cluster_stats.py | Python | apache-2.0 | 19,273 | 0.01349 |
"""`main` is the top level module for your Flask application."""
# Import the Flask Framework
import os
import json
from flask import Flask, request, send_from_directory, render_template
app = Flask(__name__, static_url_path='')
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
@app.route("/spk/json/<path:path>", methods=['POST', 'GET'])
def send_js(path):
file, ext = os.path.splitext(path)
if ext == "":
ext = ".json"
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "static", "json", file + ext)
s = ''
with open(json_url) as f:
for line in f:
s += line
return s
if __name__ == '__main__':
app.run()
| i5on9i/echoserver | main.py | Python | apache-2.0 | 1,161 | 0 |
"""This component provides select entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
import logging
from typing import Any, Final
from pyunifiprotect.data import (
Camera,
DoorbellMessageType,
IRLEDMode,
Light,
LightModeEnableType,
LightModeType,
Liveview,
RecordingMode,
Viewer,
)
from pyunifiprotect.data.devices import LCDMessage
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity import EntityCategory
from homeassistant.util.dt import utcnow
from .const import (
DOMAIN,
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
TYPE_EMPTY_VALUE,
)
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_IR = "infrared"
_KEY_REC_MODE = "recording_mode"
_KEY_VIEWER = "viewer"
_KEY_LIGHT_MOTION = "light_motion"
_KEY_DOORBELL_TEXT = "doorbell_text"
_KEY_PAIRED_CAMERA = "paired_camera"
INFRARED_MODES = [
{"id": IRLEDMode.AUTO.value, "name": "Auto"},
{"id": IRLEDMode.ON.value, "name": "Always Enable"},
{"id": IRLEDMode.AUTO_NO_LED.value, "name": "Auto (Filter Only, no LED's)"},
{"id": IRLEDMode.OFF.value, "name": "Always Disable"},
]
LIGHT_MODE_MOTION = "On Motion - Always"
LIGHT_MODE_MOTION_DARK = "On Motion - When Dark"
LIGHT_MODE_DARK = "When Dark"
LIGHT_MODE_OFF = "Manual"
LIGHT_MODES = [LIGHT_MODE_MOTION, LIGHT_MODE_DARK, LIGHT_MODE_OFF]
LIGHT_MODE_TO_SETTINGS = {
LIGHT_MODE_MOTION: (LightModeType.MOTION.value, LightModeEnableType.ALWAYS.value),
LIGHT_MODE_MOTION_DARK: (
LightModeType.MOTION.value,
LightModeEnableType.DARK.value,
),
LIGHT_MODE_DARK: (LightModeType.WHEN_DARK.value, LightModeEnableType.DARK.value),
LIGHT_MODE_OFF: (LightModeType.MANUAL.value, None),
}
MOTION_MODE_TO_LIGHT_MODE = [
{"id": LightModeType.MOTION.value, "name": LIGHT_MODE_MOTION},
{"id": f"{LightModeType.MOTION.value}Dark", "name": LIGHT_MODE_MOTION_DARK},
{"id": LightModeType.WHEN_DARK.value, "name": LIGHT_MODE_DARK},
{"id": LightModeType.MANUAL.value, "name": LIGHT_MODE_OFF},
]
DEVICE_RECORDING_MODES = [
{"id": mode.value, "name": mode.value.title()} for mode in list(RecordingMode)
]
DEVICE_CLASS_LCD_MESSAGE: Final = "unifiprotect__lcd_message"
@dataclass
class ProtectSelectEntityDescription(ProtectRequiredKeysMixin, SelectEntityDescription):
"""Describes UniFi Protect Select entity."""
ufp_options: list[dict[str, Any]] | None = None
ufp_enum_type: type[Enum] | None = None
ufp_set_function: str | None = None
CAMERA_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_REC_MODE,
name="Recording Mode",
icon="mdi:video-outline",
entity_category=EntityCategory.CONFIG,
ufp_options=DEVICE_RECORDING_MODES,
ufp_enum_type=RecordingMode,
ufp_value="recording_settings.mode",
ufp_set_function="set_recording_mode",
),
ProtectSelectEntityDescription(
key=_KEY_IR,
name="Infrared Mode",
icon="mdi:circle-opacity",
entity_category=EntityCategory.CONFIG,
ufp_required_field="feature_flags.has_led_ir",
ufp_options=INFRARED_MODES,
ufp_enum_type=IRLEDMode,
ufp_value="isp_settings.ir_led_mode",
ufp_set_function="set_ir_led_model",
),
ProtectSelectEntityDescription(
key=_KEY_DOORBELL_TEXT,
name="Doorbell Text",
icon="mdi:card-text",
entity_category=EntityCategory.CONFIG,
device_class=DEVICE_CLASS_LCD_MESSAGE,
ufp_required_field="feature_flags.has_lcd_screen",
ufp_value="lcd_message",
),
)
LIGHT_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_LIGHT_MOTION,
name="Light Mode",
icon="mdi:spotlight",
entity_category=EntityCategory.CONFIG,
ufp_options=MOTION_MODE_TO_LIGHT_MODE,
ufp_value="light_mode_settings.mode",
),
ProtectSelectEntityDescription(
key=_KEY_PAIRED_CAMERA,
name="Paired Camera",
icon="mdi:cctv",
entity_category=EntityCategory.CONFIG,
ufp_value="camera_id",
),
)
VIEWER_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_VIEWER,
name="Liveview",
icon="mdi:view-dashboard",
entity_category=None,
ufp_value="liveview",
ufp_set_function="set_liveview",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: entity_platform.AddEntitiesCallback,
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectSelects,
camera_descs=CAMERA_SELECTS,
light_descs=LIGHT_SELECTS,
viewer_descs=VIEWER_SELECTS,
)
async_add_entities(entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
"async_set_doorbell_message",
)
class ProtectSelects(ProtectDeviceEntity, SelectEntity):
"""A UniFi Protect Select Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light | Viewer,
description: ProtectSelectEntityDescription,
) -> None:
"""Initialize the unifi protect select entity."""
assert description.ufp_value is not None
self.device: Camera | Light | Viewer = device
self.entity_description: ProtectSelectEntityDescription = description
super().__init__(data)
self._attr_name = f"{self.device.name} {self.entity_description.name}"
options = description.ufp_options
if options is not None:
self._attr_options = [item["name"] for item in options]
self._hass_to_unifi_options: dict[str, Any] = {
item["name"]: item["id"] for item in options
}
self._unifi_to_hass_options: dict[Any, str] = {
item["id"]: item["name"] for item in options
}
self._async_set_dynamic_options()
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
# entities with categories are not exposed for voice and safe to update dynamically
if self.entity_description.entity_category is not None:
_LOGGER.debug(
"Updating dynamic select options for %s", self.entity_description.name
)
self._async_set_dynamic_options()
@callback
def _async_set_dynamic_options(self) -> None:
"""Options that do not actually update dynamically.
This is due to possible downstream platforms dependencies on these options.
"""
if self.entity_description.ufp_options is not None:
return
if self.entity_description.key == _KEY_VIEWER:
options = [
{"id": item.id, "name": item.name}
for item in self.data.api.bootstrap.liveviews.values()
]
elif self.entity_description.key == _KEY_DOORBELL_TEXT:
default_message = (
self.data.api.bootstrap.nvr.doorbell_settings.default_message_text
)
messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages
built_messages = (
{"id": item.type.value, "name": item.text} for item in messages
)
options = [
{"id": "", "name": f"Default Message ({default_message})"},
*built_messages,
]
elif self.entity_description.key == _KEY_PAIRED_CAMERA:
options = [{"id": TYPE_EMPTY_VALUE, "name": "Not Paired"}]
for camera in self.data.api.bootstrap.cameras.values():
options.append({"id": camera.id, "name": camera.name})
self._attr_options = [item["name"] for item in options]
self._hass_to_unifi_options = {item["name"]: item["id"] for item in options}
self._unifi_to_hass_options = {item["id"]: item["name"] for item in options}
@property
def current_option(self) -> str:
"""Return the current selected option."""
assert self.entity_description.ufp_value is not None
unifi_value = get_nested_attr(self.device, self.entity_description.ufp_value)
if unifi_value is None:
unifi_value = TYPE_EMPTY_VALUE
elif isinstance(unifi_value, Liveview):
unifi_value = unifi_value.id
elif self.entity_description.key == _KEY_LIGHT_MOTION:
assert isinstance(self.device, Light)
# a bit of extra to allow On Motion Always/Dark
if (
self.device.light_mode_settings.mode == LightModeType.MOTION
and self.device.light_mode_settings.enable_at
== LightModeEnableType.DARK
):
unifi_value = f"{LightModeType.MOTION.value}Dark"
elif self.entity_description.key == _KEY_DOORBELL_TEXT:
assert isinstance(unifi_value, LCDMessage)
return unifi_value.text
return self._unifi_to_hass_options.get(unifi_value, unifi_value)
async def async_select_option(self, option: str) -> None:
"""Change the Select Entity Option."""
if isinstance(self.device, Light):
if self.entity_description.key == _KEY_LIGHT_MOTION:
lightmode, timing = LIGHT_MODE_TO_SETTINGS[option]
_LOGGER.debug("Changing Light Mode to %s", option)
await self.device.set_light_settings(
LightModeType(lightmode),
enable_at=None if timing is None else LightModeEnableType(timing),
)
return
unifi_value = self._hass_to_unifi_options[option]
if self.entity_description.key == _KEY_PAIRED_CAMERA:
if unifi_value == TYPE_EMPTY_VALUE:
unifi_value = None
camera = self.data.api.bootstrap.cameras.get(unifi_value)
await self.device.set_paired_camera(camera)
_LOGGER.debug("Changed Paired Camera to to: %s", option)
return
unifi_value = self._hass_to_unifi_options[option]
if isinstance(self.device, Camera):
if self.entity_description.key == _KEY_DOORBELL_TEXT:
if unifi_value.startswith(DoorbellMessageType.CUSTOM_MESSAGE.value):
await self.device.set_lcd_text(
DoorbellMessageType.CUSTOM_MESSAGE, text=option
)
elif unifi_value == TYPE_EMPTY_VALUE:
await self.device.set_lcd_text(None)
else:
await self.device.set_lcd_text(DoorbellMessageType(unifi_value))
_LOGGER.debug("Changed Doorbell LCD Text to: %s", option)
return
if self.entity_description.ufp_enum_type is not None:
unifi_value = self.entity_description.ufp_enum_type(unifi_value)
elif self.entity_description.key == _KEY_VIEWER:
unifi_value = self.data.api.bootstrap.liveviews[unifi_value]
_LOGGER.debug("%s set to: %s", self.entity_description.key, option)
assert self.entity_description.ufp_set_function
coro = getattr(self.device, self.entity_description.ufp_set_function)
await coro(unifi_value)
async def async_set_doorbell_message(self, message: str, duration: str) -> None:
"""Set LCD Message on Doorbell display."""
if self.entity_description.key != _KEY_DOORBELL_TEXT:
raise HomeAssistantError("Not a doorbell text select entity")
assert isinstance(self.device, Camera)
reset_at = None
timeout_msg = ""
if duration.isnumeric():
reset_at = utcnow() + timedelta(minutes=int(duration))
timeout_msg = f" with timeout of {duration} minute(s)"
_LOGGER.debug(
'Setting message for %s to "%s"%s', self.device.name, message, timeout_msg
)
await self.device.set_lcd_text(
DoorbellMessageType.CUSTOM_MESSAGE, message, reset_at=reset_at
)
| mezz64/home-assistant | homeassistant/components/unifiprotect/select.py | Python | apache-2.0 | 13,007 | 0.001538 |
from django.db.backends.postgresql.creation import * # NOQA
| ringly/django-postgres-dbdefaults | postgresql_dbdefaults/creation.py | Python | mit | 61 | 0 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'Wouter Oosterveld',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'wouter@fizzyflux.nl',
'version': '0.1',
'install_requires': ['nose','what','boto'],
'packages': ['snaps'],
'scripts': ['scripts/snaps'],
'name': 'snaps'
}
setup(**config)
| wouteroostervld/snaps | setup.py | Python | mit | 465 | 0.004301 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from hashlib import sha1
from flask import render_template
from indico.modules.events.agreements.models.agreements import Agreement
from indico.modules.events.settings import EventSettingsProxy
from indico.util.caching import make_hashable, memoize_request
from indico.util.decorators import cached_classproperty, classproperty
from indico.util.i18n import _
from indico.util.string import return_ascii
from indico.web.flask.templating import get_overridable_template_name, get_template_module
class AgreementPersonInfo(object):
def __init__(self, name=None, email=None, user=None, data=None):
if user:
if not name:
name = user.full_name
if not email:
email = user.email
if not name:
raise ValueError('name is missing')
self.name = name
# Note: If you have persons with no email, you *MUST* have data that uniquely identifies such persons
self.email = email or None
self.user = user
self.data = data
@return_ascii
def __repr__(self):
return '<AgreementPersonInfo({}, {}, {})>'.format(self.name, self.email, self.identifier)
@property
def identifier(self):
data_string = None
if self.data:
data_string = '-'.join('{}={}'.format(k, make_hashable(v)) for k, v in sorted(self.data.viewitems()))
identifier = '{}:{}'.format(self.email, data_string or None)
return sha1(identifier).hexdigest()
class AgreementDefinitionBase(object):
"""Base class for agreement definitions"""
#: unique name of the agreement definition
name = None
#: readable name of the agreement definition
title = None
#: optional and short description of the agreement definition
description = None
#: url to obtain the paper version of the agreement form
paper_form_url = None
#: template of the agreement form - agreement definition name by default
form_template_name = None
#: template of the email body - emails/agreement_default_body.html by default
email_body_template_name = None
#: plugin containing this agreement definition - assigned automatically
plugin = None
#: default settings for an event
default_event_settings = {'manager_notifications_enabled': True}
#: default message to display when the agreement definition type is disabled
disabled_reason = _('No signatures needed.')
@classproperty
@classmethod
def locator(cls):
return {'definition': cls.name}
@cached_classproperty
@classmethod
def event_settings(cls):
return EventSettingsProxy('agreement_{}'.format(cls.name), cls.default_event_settings)
@classmethod
def can_access_api(cls, user, event):
"""Checks if a user can list the agreements for an event"""
return event.can_manage(user)
@classmethod
def extend_api_data(cls, event, person, agreement, data): # pragma: no cover
"""Extends the data returned in the HTTP API
:param event: the event
:param person: the :class:`AgreementPersonInfo`
:param agreement: the :class:`Agreement` if available
:param data: a dict containing the default data for the agreement
"""
pass
@classmethod
def get_email_body_template(cls, event, **kwargs):
"""Returns the template of the email body for this agreement definition"""
template_name = cls.email_body_template_name or 'emails/agreement_default_body.html'
template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/')
return get_template_module(template_path, event=event)
@classmethod
@memoize_request
def get_people(cls, event):
"""Returns a dictionary of :class:`AgreementPersonInfo` required to sign agreements"""
people = cls.iter_people(event)
if people is None:
return {}
return {p.identifier: p for p in people}
@classmethod
def get_people_not_notified(cls, event):
"""Returns a dictionary of :class:`AgreementPersonInfo` yet to be notified"""
people = cls.get_people(event)
sent_agreements = {a.identifier for a in event.agreements.filter_by(type=cls.name)}
return {k: v for k, v in people.items() if v.identifier not in sent_agreements}
@classmethod
def get_stats_for_signed_agreements(cls, event):
"""Returns a digest of signed agreements on an event
:param event: the event
:return: (everybody_signed, num_accepted, num_rejected)
"""
people = cls.get_people(event)
identifiers = [p.identifier for p in people.itervalues()]
query = event.agreements.filter(Agreement.type == cls.name, Agreement.identifier.in_(identifiers))
num_accepted = query.filter(Agreement.accepted).count()
num_rejected = query.filter(Agreement.rejected).count()
everybody_signed = len(people) == (num_accepted + num_rejected)
return everybody_signed, num_accepted, num_rejected
@classmethod
def is_active(cls, event):
"""Checks if the agreement type is active for a given event"""
return bool(cls.get_people(event))
@classmethod
def is_agreement_orphan(cls, event, agreement):
"""Checks if the agreement no longer has a corresponding person info record"""
return agreement.identifier not in cls.get_people(event)
@classmethod
def render_form(cls, agreement, form, **kwargs):
template_name = cls.form_template_name or '{}.html'.format(cls.name.replace('-', '_'))
template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/')
return render_template(template_path, agreement=agreement, form=form, **kwargs)
@classmethod
def render_data(cls, event, data): # pragma: no cover
"""Returns extra data to display in the agreement list
If you want a column to be rendered as HTML, use a :class:`~markupsafe.Markup`
object instead of a plain string.
:param event: The event containing the agreements
:param data: The data from the :class:`AgreementPersonInfo`
:return: List of extra columns for a row
"""
return None
@classmethod
def handle_accepted(cls, agreement): # pragma: no cover
"""Handles logic on agreement accepted"""
pass
@classmethod
def handle_rejected(cls, agreement): # pragma: no cover
"""Handles logic on agreement rejected"""
pass
@classmethod
def handle_reset(cls, agreement): # pragma: no cover
"""Handles logic on agreement reset"""
pass
@classmethod
def iter_people(cls, event): # pragma: no cover
"""Yields :class:`AgreementPersonInfo` required to sign agreements"""
raise NotImplementedError
| nop33/indico | indico/modules/events/agreements/base.py | Python | gpl-3.0 | 7,667 | 0.002739 |
# -*- coding: utf-8 -*-
import os
import time
import logging
import string
import requests
import unicodedata
import base64
try: import cPickle as pickle
except: import pickle
import datetime
from django.utils import timezone
import json
from pprint import pprint
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.http import HttpResponseForbidden
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
logger = logging.getLogger(__name__)
import boto.ec2
import boto.ec2.cloudwatch
from django.contrib.auth.models import User
from userprofile.models import Profile as userprofile
from userprofile.views import _log_user_activity
from amazon import s3_funcs
from amazon import s3_funcs_shortcuts
from django.contrib.auth.decorators import login_required
from django.template.defaultfilters import filesizeformat, upper
from django.contrib.humanize.templatetags.humanize import naturalday
from cloudly.templatetags.cloud_extras import clean_ps_command
from operator import itemgetter, attrgetter, methodcaller
from cloudly.templatetags.cloud_extras import clear_filename, get_file_extension
from vms.models import Cache
import decimal
from django.db.models.base import ModelState
import pymongo
from pymongo import MongoClient
from pymongo import ASCENDING, DESCENDING
client = MongoClient('mongo', 27017)
mongo = client.cloudly
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
@login_required()
def update_session(request):
for value in request.POST:
if(value != 'secret'):
request.session[value] = request.POST[value]
request.session.modified = True
return render_to_response('ajax_null.html', locals())
@login_required()
def aws_vm_view(request,vm_name):
print '-- aws_vm_view'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name,"aws_vm_view",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
try:
vm_cache = pickle.loads(vm_cache)[vm_name]
except:
return HttpResponse("XXX " + vm_name)
ec2_region = vm_cache['instance']['region']['name']
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
if(vms_cache.vms_console_output_cache):
console_output = vms_cache.vms_console_output_cache
else:
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
reservations = ec2conn.get_all_instances(instance_ids=[vm_name,])
instance = reservations[0].instances[0]
console_output = instance.get_console_output()
console_output = console_output.output
if(not console_output):
console_output = ""
vms_cache.vms_console_output_cache = console_output
vms_cache.save()
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=60)
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkIn")[0]
networkin_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkOut")[0]
networkout_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadOps")[0]
disk_readops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteOps")[0]
disk_writeops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadBytes")[0]
disk_readbytes_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteBytes")[0]
disk_writebytes_datapoints = metric.query(start, end, 'Average', '')
networkin_datapoints = json.dumps(networkin_datapoints,default=date_handler)
networkout_datapoints = json.dumps(networkout_datapoints,default=date_handler)
disk_readops_datapoints = json.dumps(disk_readops_datapoints,default=date_handler)
disk_writeops_datapoints = json.dumps(disk_writeops_datapoints,default=date_handler)
disk_readbytes_datapoints = json.dumps(disk_readbytes_datapoints,default=date_handler)
disk_writebytes_datapoints = json.dumps(disk_writebytes_datapoints,default=date_handler)
return render_to_response('aws_vm.html', {'vm_name':vm_name,'vm_cache':vm_cache,'console_output':console_output,'networkin_datapoints':networkin_datapoints,'networkout_datapoints':networkout_datapoints,'disk_readops_datapoints':disk_readops_datapoints,'disk_writeops_datapoints':disk_writeops_datapoints,'disk_readbytes_datapoints':disk_readbytes_datapoints,'disk_writebytes_datapoints':disk_writebytes_datapoints,}, context_instance=RequestContext(request))
@login_required()
def control_aws_vm(request, vm_name, action):
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name+"/"+action+"/","control_aws_vm",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
vm_cache = pickle.loads(vm_cache)[vm_name]
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2_region = vm_cache['instance']['region']['name']
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
if(action=="reboot"):
ec2conn.reboot_instances([vm_name,])
if(action=="start"):
ec2conn.start_instances([vm_name,])
if(action=="stop"):
ec2conn.stop_instances([vm_name,])
if(action=="terminate"):
ec2conn.terminate_instances([vm_name,])
return HttpResponseRedirect("/")
@login_required()
def server_view(request, hwaddr):
print '-- server_view'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/server/"+hwaddr,"server_view",ip=ip)
hwaddr_orig = hwaddr
hwaddr = hwaddr.replace('-',':')
server = mongo.servers.find_one({'secret':profile.secret,'uuid':hwaddr,})
server_status = "Running"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
server_status = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
server_status = "Offline"
try:
uuid = server['uuid']
except:
return HttpResponse("access denied")
disks_usage_ = []
#disks_usage = mongo.disks_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in disks_usage: disks_usage_.append(i)
disks_usage = disks_usage_
networking_ = []
#networking = mongo.networking.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in networking: networking_.append(i)
networking = networking_
mem_usage_ = []
#mem_usage = mongo.memory_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in mem_usage: mem_usage_.append(i)
mem_usage = mem_usage_
loadavg_ = []
#loadavg = mongo.loadavg.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in loadavg: loadavg_.append(i)
loadavg = loadavg_
activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(3)
disks = []
disks_ = server[u'disks_usage']
for disk in disks_:
if not disk[5] in disks:
disks.append(disk[5])
return render_to_response('server_detail.html', {'secret':profile.secret,'hwaddr':hwaddr,'hwaddr_orig':hwaddr_orig,'server':server,'server_status':server_status,'disks_usage':disks_usage,'disks':disks,'mem_usage':mem_usage,'loadavg':loadavg,'networking':networking,'activity':activity,}, context_instance=RequestContext(request))
@login_required()
def ajax_update_server_name(request):
response = {}
response["success"] = "true"
response = str(response).replace('u"','"')
response = response.replace("'",'"')
server_ = request.POST['server']
secret = request.POST['secret']
server_ = server_.replace('-', ':')
server = mongo.servers.find_one({'secret':secret,'uuid':server_,})
if request.POST["servername"] == "":
server['name'] = request.POST['server'].replace("-", ":")
else:
server['name'] = request.POST["servername"]
server = mongo.servers.update({'secret':secret, 'uuid':server_}, server)
vms_cache = Cache.objects.get(user=request.user)
vms_cache.delete()
return HttpResponse(response, content_type="application/json")
@login_required()
def ajax_vms_refresh(request):
user = request.user
profile = userprofile.objects.get(user=request.user)
print 'Refreshing', user, 'VMs cache..'
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
virtual_machines = {}
servers = mongo.servers.find({'secret':profile.secret,}).sort('_id',-1)
vms_cache = Cache.objects.get_or_create(user=user)
vms_cache = vms_cache[0]
vms_cache.is_updating = True
vms_cache.save()
if(servers.count()):
print 'servers count', servers.count()
for server in servers:
instance_metrics = {}
instance_metrics['id'] = server['uuid']
instance_metrics['user_id'] = request.user.id
instance_metrics['provider'] = 'agent'
instance_metrics['instance'] = {}
instance_metrics['instance']['user_id'] = request.user.id
instance_metrics['instance']['state'] = {}
instance_metrics['instance']['tags'] = {}
try:
instance_metrics["instance"]['tags']['Name'] = server['name']
#instance_metrics["instance"]['tags']['Name'] = ''.join(x for x in unicodedata.normalize('NFKD', server['hostname']) if x in string.ascii_letters).lower()
except:
instance_metrics["instance"]['tags']['Name'] = server['hostname'].replace('.','-').lower()
uuid = server['uuid']
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
instance_metrics['instance']['state']['state'] = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
instance_metrics['instance']['state']['state'] = "Offline"
else:
instance_metrics['instance']['state']['state'] = "Running"
cpu_usage_ = ""
params = {'start':'2m-ago','m':'sum:' + uuid.replace(':','-') + '.sys.cpu'}
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
try:
tsdb_response = tsdb_response[0]['dps']
except:
tsdb_response = []
c=0
for i in tsdb_response:
cpu_usage_ += str(round(tsdb_response[i],2))
cpu_usage_ += ","
if(c==60): break
c+=1
cpu_usage = cpu_usage_[:-1]
cpu_usage_reversed = ""
cpu_usage_array_reversed = []
for i in cpu_usage.split(','): cpu_usage_array_reversed.insert(0,i)
for i in cpu_usage_array_reversed: cpu_usage_reversed += str(i)+","
cpu_usage_reversed = cpu_usage_reversed[:-1]
instance_metrics['cpu_utilization_datapoints'] = cpu_usage_reversed
virtual_machines[server['uuid'].replace(':','-')] = instance_metrics
#print 'virtual_machines', virtual_machines
if aws_ec2_verified:
aws_regions = profile.aws_enabled_regions.split(',')
print 'AWS regions', aws_regions
for ec2_region in aws_regions:
if(ec2_region):
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
try:
reservations = ec2conn.get_all_instances()
except:
vms_cache.is_updating = False
vms_cache.vms_response = ""
vms_cache.save()
print vms_cache.is_updating
print vms_cache.vms_response
#return HttpResponse("access denied")
instances = [i for r in reservations for i in r.instances]
for instance in instances:
if not instance: continue
instance_metrics = {}
instance_metrics['instance'] = {}
print '** instance', instance.id, instance.private_ip_address
volumes = []
for volume in ec2conn.get_all_volumes(filters={'attachment.instance-id': instance.id}):
volumes.append([volume.id, volume.iops, volume.size,])
groups = []
for group in instance.__dict__['groups']:
groups.append([group.id, group.name,])
instance_metrics['id'] = instance.id
instance_metrics['user_id'] = request.user.id
instance_metrics['provider'] = "aws-ec2"
instance_metrics['instance']['placement'] = instance.placement
instance_metrics['instance']['user_id'] = request.user.id
instance_metrics['instance']['groups'] = groups
instance_metrics['instance']['block_device_mapping'] = volumes
instance_metrics['instance']['architecture'] = instance.architecture
instance_metrics['instance']['client_token'] = instance.client_token
instance_metrics['instance']['dns_name'] = instance.dns_name
instance_metrics['instance']['private_ip_address'] = instance.private_ip_address
instance_metrics['instance']['hypervisor'] = instance.hypervisor
instance_metrics['instance']['id'] = instance.id
instance_metrics['instance']['image_id'] = instance.image_id
instance_metrics['instance']['instance_type'] = instance.instance_type
instance_metrics['instance']['ip_address'] = instance.ip_address
instance_metrics['instance']['key_name'] = instance.key_name
instance_metrics['instance']['launch_time'] = instance.launch_time
instance_metrics['instance']['monitored'] = instance.monitored
instance_metrics['instance']['persistent'] = instance.persistent
instance_metrics['instance']['ramdisk'] = instance.ramdisk
instance_metrics['instance']['root_device_name'] = instance.root_device_name
instance_metrics['instance']['root_device_type'] = instance.root_device_type
instance_metrics['instance']['tags'] = instance.tags
instance_metrics['instance']['virtualization_type'] = instance.virtualization_type
instance_metrics['instance']['vpc_id'] = instance.vpc_id
instance_metrics['instance']['region'] = {"endpoint":instance.region.endpoint,"name":instance.region.name,}
instance_metrics['instance']['state'] = {"state":instance.state,"code":instance.state_code,"state_reason":instance.state_reason,}
virtual_machines[instance.id] = instance_metrics
print 'Updating', request.user, 'cache..'
print instance.platform, instance.product_codes
try:
ec2conn.monitor_instance(str(instance.id))
except:
print instance.id, 'instance not in a monitorable state!!'.upper()
#pprint(instance_metrics)
continue
# Here is where you define start - end for the Logs...............
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=60)
# This is how you list all possible values on the response....
# print ec2conn.list_metrics()
try:
metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0]
except: continue
cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent')
instance_metrics['cpu_utilization_datapoints'] = json.dumps(cpu_utilization_datapoints,default=date_handler)
virtual_machines[instance.id] = instance_metrics
vms_cache.vms_response = base64.b64encode(pickle.dumps(virtual_machines, pickle.HIGHEST_PROTOCOL))
vms_cache.last_seen = timezone.now()
vms_cache.is_updating = False
vms_cache.save()
print 'VMs cache was succesfully updated.'
return HttpResponse("ALLDONE")
@login_required()
def ajax_virtual_machines(request):
print '-- ajax virtual machines'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
try:
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
except: vm_cache = {}
try:
vm_cache = pickle.loads(vm_cache)
except: vm_cache = {}
c=0
ajax_vms_response = "{"
for vm in vm_cache:
if(vm_cache[vm]["instance"]["state"]["state"].lower()!="terminated"):
data_median = 0
isotope_filter_classes = " offline linux "
try:
data = ""
cpu_utilization_datapoints = vm_cache[vm]["cpu_utilization_datapoints"]
cpu_utilization_datapoints = json.loads(cpu_utilization_datapoints)
z=0
for i in cpu_utilization_datapoints:
data += str(i["Average"])
try:
data_median += float(i["Average"])
except: pass
if(len(cpu_utilization_datapoints)-1>z):
data += ","
#print data
z+=1
try:
data_median = data_median/z
except: data_median = 0
except:
try:
data = vm_cache[vm]["cpu_utilization_datapoints"]
z = 0
data_median = 0
for i in data.split(','):
z+=1
data_median += float(i)
data_median = data_median/z
except: data = ""
try:
instance_name = vm_cache[vm]["instance"]["tags"]["Name"]
except:
instance_name = vm
print 'instance_name', instance_name
color = "silver "
vm_state = vm_cache[vm]["instance"]["state"]["state"].title()
server_mac_address = vm_cache[vm]['id']
server_mac_address = str(server_mac_address).replace(':','-')
if(vm_state=="Running"):
isotope_filter_classes = " linux "
if(data_median<17):
color = "lightBlue "
if(data_median>=17 and data_median<=35):
color = "green "
isotope_filter_classes += " busy"
if(data_median>35 and data_median<=50):
color = "darkGreen "
isotope_filter_classes += " busy"
if(data_median>50 and data_median<=70):
color = "lightOrange "
isotope_filter_classes += " busy"
if(data_median>70):
isotope_filter_classes += " busy critical"
color = "red "
if data_median>85:
vm_state = "Hot hot hot!"
if(vm_state=="Stopping"):
color = "pink "
if(vm_state=="Pending"):
color = "pink "
if(vm_state=="Shutting-Down"):
color = "pink "
if(vm_state=="Stopped"):
isotope_filter_classes += " offline"
if(vm_cache[vm]['provider']!='agent'):
isotope_filter_classes += " cloud"
ajax_vms_response += "\""
ajax_vms_response += server_mac_address
ajax_vms_response += "\": {"
ajax_vms_response += "\"vmcolor\":\""
ajax_vms_response += color
ajax_vms_response += "\","
ajax_vms_response += "\"vmname\":\""
ajax_vms_response += instance_name
ajax_vms_response += "\","
ajax_vms_response += "\"vmtitle\":\""
ajax_vms_response += isotope_filter_classes
ajax_vms_response += "\","
ajax_vms_response += "\"averge\":\""
ajax_vms_response += data
ajax_vms_response += "\","
ajax_vms_response += "\"state\":\""
ajax_vms_response += vm_state
ajax_vms_response += "\","
ajax_vms_response += "\"link\":\""
if(vm_cache[vm]['provider']=='agent'):
ajax_vms_response += "/server/"+vm+"/"
else:
ajax_vms_response += "/aws/"+vm+"/"
ajax_vms_response += "\""
ajax_vms_response += "},"
if(c==len(vm_cache)-1):
ajax_vms_response += "}"
c+=1
#print '-_'*80
#print vm_cache[vm]["instance"]["state"]["state"].title(), vm
ajax_vms_response = ajax_vms_response.replace(",}","}")
if(not vm_cache): ajax_vms_response = {}
return render_to_response('ajax_virtual_machines.html', {'user':user,'ajax_vms_response':ajax_vms_response,'vms_cached_response':vm_cache,}, context_instance=RequestContext(request))
@login_required()
def ajax_aws_graphs(request, instance_id, graph_type="all"):
print '-- ajax_aws_graphs', request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
try:
vm_cache = pickle.loads(vm_cache)[instance_id]
except:
return HttpResponse("XXX " + instance_id)
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2_region = vm_cache['instance']['region']['name']
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
reservations = ec2conn.get_all_instances(instance_ids=[instance_id,])
instance = reservations[0].instances[0]
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(days=10)
metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance_id}, metric_name="CPUUtilization")[0]
cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent',period=3600)
return HttpResponse("data " + instance_id + "=" + str(instance) + " ** " + graph_type.upper())
@login_required()
def ajax_server_graphs(request, hwaddr, graph_type=""):
print '-- ajax_server_graphs, type', graph_type
print request.user
graphs_mixed_respose = []
secret = request.POST['secret']
uuid = request.POST['server']
uuid = uuid.replace('-',':')
server = mongo.servers.find_one({'secret':secret,'uuid':uuid,})
print 'debug', secret, uuid
try:
uuid = server['uuid']
except:
return HttpResponse("access denied")
server_status = "Running"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
server_status = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
server_status = "Offline"
#activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(3)
if(graph_type=="server_info"):
graphs_mixed_respose = {}
graphs_mixed_respose['name'] = server['name']
graphs_mixed_respose['server_info_hostname'] = server['hostname']
graphs_mixed_respose['cpu_used'] = server['cpu_usage']['cpu_used']
graphs_mixed_respose['memory_used'] = server['memory_usage']['memory_used_percentage']
graphs_mixed_respose['swap_used'] = server['memory_usage']['swap_used_percentage']
graphs_mixed_respose['loadavg_used'] = server['loadavg'][1]
graphs_mixed_respose['server_info_uptime'] = server['uptime']
graphs_mixed_respose['server_info_loadavg'] = server['loadavg']
graphs_mixed_respose['server_info_status'] = server_status
graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"')
graphs_mixed_respose = graphs_mixed_respose.replace("'",'"')
graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"')
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="processes"):
processes_ = []
processes = server['processes']
c=0
for line in processes:
if(c>0):
if not line:break
line = line.split(' ')
line_ = []
for i in line:
if i: line_.append(i)
line = line_
process_user = line[0]
process_pid = line[1]
process_cpu = line[2]
process_mem = line[3]
process_vsz = line[4]
process_rss = line[5]
process_tty = line[6]
process_stat = line[7]
process_start_time = line[8]+'-'+line[9]
process_command = line[10:]
process_name = clean_ps_command(process_command[0])
process = {
'pid': process_pid,
'cpu': process_cpu+'%',
'mem': process_mem+'%',
# 'vsz': process_vsz,
# 'rss': process_rss,
# 'tty': process_tty,
# 'stat': process_stat,
# 'start_time': process_start_time,
'process': process_name,
'command': ' '.join(str(x) for x in process_command).replace("[", "").replace("]","")
}
process['user'] = '<span class=\\"label label-success\\">'
if int(float(process_cpu)) > 50:
process['user'] = '<span class=\\"label label-warning\\">'
if int(float(process_cpu)) > 75:
process['user'] = '<span class=\\"label label-danger\\">'
process['user'] += process_user
process['user'] += '</span>'
processes_.append(process)
c+=1
processes = {}
processes['data'] = processes_
processes = str(processes).replace(" u'"," '").replace("[u'","['").replace("'",'"').replace("\\\\", "\\")
return HttpResponse(processes, content_type="application/json")
if(graph_type=="network_connections"):
network_connections_ = []
network_connections = server['network_connections']['listen']
for conn in network_connections:
connection = {}
connection['proto'] = conn[1]
connection['recv-q'] = conn[2]
connection['send-q'] = conn[3]
connection['address'] = conn[4]
if conn[6]:
connection['port'] = conn[5] + "/" + conn[6]
else:
connection['port'] = conn[5]
network_connections_.append(connection)
network_connections = {}
network_connections['data'] = network_connections_
network_connections = str(network_connections).replace(" u'"," '")
network_connections = str(network_connections).replace("'",'"')
return HttpResponse(network_connections, content_type="application/json")
if(graph_type=="active_network_connections"):
active_network_connections_ = []
active_network_connections = server['network_connections']['established']
for conn in active_network_connections:
connection = {}
connection['proto'] = conn[1]
connection['recv-q'] = conn[2]
connection['send-q'] = conn[3]
connection['local-address'] = conn[7]
connection['foreign-address'] = conn[4]
connection['foreign-port'] = conn[5]
active_network_connections_.append(connection)
active_network_connections = {}
active_network_connections['data'] = active_network_connections_
active_network_connections = str(active_network_connections).replace(" u'"," '")
active_network_connections = str(active_network_connections).replace("'",'"')
return HttpResponse(active_network_connections, content_type="application/json")
if(graph_type=="loadavg"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = [[],[],[]]
loadavg_specific_queries = ['1-min','5-mins','15-mins']
count = 0
for i in loadavg_specific_queries:
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.loadavg'}
params_ = params
params_['m'] = params['m'] + "{avg="+i+"}"
tsdb = requests.get('http://hbase:4242/api/query', params=params_)
params = params_
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose[count].append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose[count] = sorted(graphs_mixed_respose[count], key=itemgetter(0))
graphs_mixed_respose[count] = graphs_mixed_respose[count][::-1]
count += 1
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="disks"):
print '*'*1000
print request.POST
mount_ponit = request.POST['mountPoint']
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.disks'}
params['m'] += "{mm=disk_used,mount_point="+mount_ponit+"}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="cpu_usage"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.cpu'}
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="mem_usage" or graph_type=="swap_usage"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.memory'}
if(graph_type=="mem_usage"):
params['m'] += "{mm=memory_used}"
if(graph_type=="swap_usage"):
params['m'] += "{mm=swap_used}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="network_input_packets" or graph_type=="inbound_traffic" or graph_type=="network_output_packets" or graph_type=="outbound_traffic"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.network'}
if(graph_type=="network_input_packets"):
params['m'] += "{mm=input_accept_packets}"
if(graph_type=="network_input_bytes"):
params['m'] += "{mm=input_accept_bytes}"
if(graph_type=="network_output_packets"):
params['m'] += "{mm=output_accept_packets}"
if(graph_type=="network_output_bytes"):
params['m'] += "{mm=output_accept_bytes}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
return HttpResponse("I'm sorry I don't understand")
def ajax_virtual_machines_box(request):
return render_to_response('ajax_virtual_machines_box.html', locals(), context_instance=RequestContext(request))
| followyourheart/cloudly | vms/views.py | Python | mit | 39,971 | 0.014586 |
# Python - 3.6.0
century = lambda year: year // 100 + ((year % 100) > 0)
| RevansChen/online-judge | Codewars/8kyu/century-from-year/Python/solution1.py | Python | mit | 74 | 0.013514 |
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View, ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView
from content.models import Sub, SubFollow, Post, Commit
from content.forms import SubForm, PostForm, CommitForm
from notify.models import Noty
from core.core import random_avatar_sub
class CreateSubView(CreateView):
template_name = 'content/sub_create.html'
form_class = SubForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
obj.image = 'sub/%s.png' % (obj.slug)
obj.save()
random_avatar_sub(obj.slug)
return HttpResponseRedirect('/sub')
class SubView(ListView):
template_name = 'content/sub.html'
model = Sub
class FrontView(ListView):
template_name = 'layouts/post_list.html'
paginate_by = 4
def get(self, request, *args, **kwargs):
if request.is_ajax(): self.template_name = 'ajax/post_list.html'
return super(FrontView, self).get(request, *args, **kwargs)
def get_queryset(self):
if self.kwargs['tab'] == 'top': return Post.objects.last_commited()
else: return Post.objects.created()
def get_context_data(self, **kwargs):
context = super(FrontView, self).get_context_data(**kwargs)
context['list'] = 'portada'
context['tab_show'] = self.kwargs['tab']
if self.kwargs['tab'] == 'top': context['list_url'] = '/'
else: context['list_url'] = '/new'
return context
class SubPostListView(ListView):
template_name = 'content/sub_post_list.html'
paginate_by = 4
def get(self, request, *args, **kwargs):
if request.is_ajax(): self.template_name = 'ajax/post_list.html'
return super(SubPostListView, self).get(request, *args, **kwargs)
def get_queryset(self):
if self.kwargs['tab'] == 'top': return Post.objects.sub_last_commited(self.kwargs['sub'])
else: return Post.objects.sub_created(self.kwargs['sub'])
def get_context_data(self, **kwargs):
context = super(SubPostListView, self).get_context_data(**kwargs)
sub = Sub.objects.get(pk=self.kwargs['sub'])
user = self.request.user
if self.kwargs['tab'] == 'followers': context['followers'] = True
context['tab_show'] = self.kwargs['tab']
context['list'] = sub
context['tab'] = self.kwargs['tab']
if self.kwargs['tab'] == 'top': context['list_url'] = '/sub/%s' % sub
else: context['list_url'] = '/sub/%s/new' % sub
context['action'] = 'follow'
if user.is_authenticated():
follow_state = SubFollow.objects.by_id(sub_followid='%s>%s' % (user.pk, sub.pk))
if follow_state: context['action'] = 'unfollow'
else: context['action'] = 'follow'
return context
class PostCommitView(CreateView):
template_name = 'layouts/post_detail.html'
form_class = CommitForm
def get_context_data(self, **kwargs):
context = super(PostCommitView, self).get_context_data(**kwargs)
pk, slug = self.kwargs['pk'], self.kwargs['slug']
context['object'] = Post.objects.by_post(pk, slug)
return context
def form_valid(self, form):
if self.request.user.is_authenticated():
user = self.request.user
post = Post.objects.get(postid=self.kwargs['pk'])
obj = form.save(commit=False)
obj.create_commit(user, post)
if not obj.post.user.pk == user.pk:
noty = Noty.objects.create(user_id=obj.post.user_id, category='C', commit=obj)
noty.create_noty()
return HttpResponseRedirect(obj.get_commit_url())
else:
commit_url = '/post/%s/%s/' % (self.kwargs['pk'], self.kwargs['slug'])
return HttpResponseRedirect('/login/?next=%s' % (commit_url))
class CreatePostView(CreateView):
template_name = 'layouts/post_create.html'
form_class = PostForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = self.request.user
obj.save()
if obj.draft: return HttpResponseRedirect('/created')
else:
obj.user.last_commited = obj.created
obj.user.save()
obj.sub.last_commited = obj.created
obj.sub.save()
obj.last_commited = obj.created
obj.save()
return HttpResponseRedirect(obj.get_absolute_url())
class UpdatePostView(UpdateView):
template_name = 'layouts/post_create.html'
form_class = PostForm
def get_queryset(self):
return Post.objects.by_user(self.request.user)
def form_valid(self, form):
obj = form.save(commit=False)
if not obj.last_commited and not obj.draft:
now = datetime.now()
obj.last_commited = now
obj.user.last_commited = now
obj.user.save()
obj.sub.last_commited = now
obj.sub.save()
obj.save()
if obj.draft: return HttpResponseRedirect('/created')
else: return HttpResponseRedirect(obj.get_absolute_url())
class PostUserCreatedView(ListView):
template_name = 'content/post_user_created.html'
def get_queryset(self):
return Post.objects.by_user(self.request.user)
class SubFollowCreate(View):
def post(self, request, *args, **kwargs):
user = self.request.user
sub_followed = self.kwargs['followed']
sub_followed_obj = SubFollow.objects.create(follower=user, sub_id=sub_followed)
sub_followed_obj.save()
sub_followed_obj.follower.sub_following_number += 1
sub_followed_obj.follower.save()
sub_followed_obj.sub.follower_number += 1
sub_followed_obj.sub.save()
return HttpResponse(status=200)
class SubFollowDelete(View):
def post(self, request, *args, **kwargs):
sub_unfollowed = self.kwargs['unfollowed']
sub_unfollowed_obj = SubFollow.objects.get(follower=self.request.user, sub_id=sub_unfollowed)
sub_unfollowed_obj.follower.sub_following_number -= 1
sub_unfollowed_obj.follower.save()
sub_unfollowed_obj.sub.follower_number -= 1
sub_unfollowed_obj.sub.save()
sub_unfollowed_obj.delete()
return HttpResponse(status=200)
| ellipticaldoor/dfiid | project/content/views.py | Python | gpl-2.0 | 5,667 | 0.02541 |
# -*- coding: utf-8 -*-
import time
from openerp import api, models
import datetime
class ReportSampleReceivedvsReported(models.AbstractModel):
_name = 'report.olims.report_sample_received_vs_reported'
def _get_samples(self, samples):
datalines = {}
footlines = {}
total_received_count = 0
total_published_count = 0
for sample in samples:
# For each sample, retrieve check is has results published
# and add it to datalines
published = False
analyses = self.env['olims.analysis_request'].search([('Sample_id', '=', sample.id)])
if analyses:
for analysis in analyses:
if not (analysis.DatePublished is False):
published = True
break
datereceived = datetime.datetime.strptime(sample.DateReceived, \
"%Y-%m-%d %H:%M:%S")
monthyear = datereceived.strftime("%B") + " " + datereceived.strftime(
"%Y")
received = 1
publishedcnt = published and 1 or 0
if (monthyear in datalines):
received = datalines[monthyear]['ReceivedCount'] + 1
publishedcnt = published and datalines[monthyear][
'PublishedCount'] + 1 or \
datalines[monthyear]['PublishedCount']
ratio = publishedcnt / received
dataline = {'MonthYear': monthyear,
'ReceivedCount': received,
'PublishedCount': publishedcnt,
'UnpublishedCount': received - publishedcnt,
'Ratio': ratio,
'RatioPercentage': '%02d' % (
100 * (float(publishedcnt) / float(received))) + '%'}
datalines[monthyear] = dataline
total_received_count += 1
total_published_count = published and total_published_count + 1 or total_published_count
# Footer total data
if total_received_count > 0:
ratio = total_published_count / total_received_count
else:
ratio = total_published_count / 1
try:
footline = {'ReceivedCount': total_received_count,
'PublishedCount': total_published_count,
'UnpublishedCount': total_received_count - total_published_count,
'Ratio': ratio,
'RatioPercentage': '%02d' % (100 * (
float(total_published_count) / float(
total_received_count))) + '%'
}
except:
footline = {'ReceivedCount': total_received_count,
'PublishedCount': total_published_count,
'UnpublishedCount': total_received_count - total_published_count,
'Ratio': ratio,
'RatioPercentage': '%02d' % (100 * (
float(total_published_count) / float(
1))) + '%'
}
footlines['Total'] = footline
return datalines, footlines
@api.multi
def render_html(self, data):
startdate = datetime.datetime.strptime(data['form'].get('date_from'), \
"%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S")
enddate = datetime.datetime.strptime(data['form'].get('date_to'), \
"%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S")
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_id'))
samples = self.env['olims.sample'].search([('SamplingDate', '>=', startdate), \
('SamplingDate', '<=', enddate), \
('state', 'in', ['sample_received','expired','disposed'])])
samples_res, footlines= self.with_context(data['form'].get('used_context'))._get_samples(samples)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'Samples': samples_res,
'footlines' : footlines #sum(samples_res.values())
}
return self.env['report'].render('olims.report_sample_received_vs_reported', docargs) | sciCloud/OLiMS | report/olims_sample_received_vs_reported.py | Python | agpl-3.0 | 4,408 | 0.009301 |
# SnippetViewPlugin - Provides a templated/abbreviation expansion mechanism for
# the editor.
#
# Copyright (C) 2006-2010 Frank Hale <frankhale@gmail.com>
#
# ##sandbox - irc.freenode.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gio
import gtk
import gtk.gdk
import gtksourceview2
import pango
import re
import datetime
import os.path
import xml.etree.ElementTree as ET
SNIPPET_XML = "snippets.xml"
MIME_ALIAS = {
"text/x-python" : ["python"],
"application/x-ruby" : ["ruby", "rails"]
}
# Change Date/Time formats as needed
DATE_FORMAT = "%B %d %Y"
TIME_FORMAT = "%H:%M"
DATE = datetime.datetime.now().strftime(DATE_FORMAT)
TIME = datetime.datetime.now().strftime(TIME_FORMAT)
DATETIME = "%s @ %s" % (datetime.datetime.now().strftime(DATE_FORMAT), datetime.datetime.now().strftime(TIME_FORMAT))
class Snippet:
def __init__(self):
self.language=""
self.shortcut=""
self.snippet=""
def mimetype(self):
return MIME[self.language]
class SnippetLoader:
def load_all(self):
SNIPPETS = []
root = ET.parse(SNIPPET_XML)
for snippet in root.getiterator("snippet"):
if snippet.get("language") and snippet.get("shortcut"):
snip = Snippet()
snip.language = snippet.get("language")
snip.shortcut = snippet.get("shortcut")
snip.snippet = snippet.text.strip()
SNIPPETS.append(snip)
return SNIPPETS
def load(self, language):
all_snips = self.load_all()
return [s for s in all_snips if s.language==language]
def get_common(self):
return self.load("common")
# Common snippets that are useful regardless of document, used for built in snippets
COMMON_SNIPPETS = {
"^d" : DATE, # expands to the current date supplied by the date format above
"^t" : TIME, # expands to the current time supplied by the time format above
"^dt" : DATETIME # expands to a combination of the date and time supplied by the formats above
}
BUILT_IN_SNIPPETS = []
# For each of the common snippets make a Snippet object, plug in the key,value and add it to the built in snippets
# list
for KEY,VALUE in COMMON_SNIPPETS.items():
s = Snippet()
s.shortcut = KEY
s.snippet = VALUE
s.language = "common"
BUILT_IN_SNIPPETS.append(s)
class SnippetViewPlugin(object):
metadata = {
"name" : "Snippet Source View Plugin",
"authors" : ["Frank Hale <frankhale@gmail.com>"],
"website" : "http://github.com/frankhale/nyana",
"version" : "0.6.0",
"development status" : "beta",
"date" : "31 JULY 2007",
"enabled" : True,
"short description" : "Provides abbreviation expansion via tab key",
"long description" : "Provides a snippet feature which allows one to create abbreviations that are expandable by hitting the tab key. Special variables can be inserted into the snippets to make them tabbable and provide a quick way to create code."
}
def __init__(self, editor):
self.editor = editor
self.editor.event_manager.register_listener("buffer_change", self.event_buffer_change)
self.editor.event_manager.register_listener("scroll_to_insert", self.scroll_to_insert)
self.editor.source_view.set_highlight_current_line(True)
self.editor.source_view.set_wrap_mode(gtk.WRAP_NONE)
# regular expression used to find our special variables.
#
# variables look like ${foo}
self.variable_re = re.compile('\${([^\${}]*)}')
self.SNIPPETS = []
self.SNIPPET_MARKS = []
self.SNIPPET_OFFSETS = []
self.SNIPPET_START_MARK = None
self.SNIPPET_END_MARK = None
self.IN_SNIPPET = False
self.HAS_NO_VARIABLES=False
self.TABBED = True
self.mime_type = None
self.editor.source_view.set_show_line_numbers(True)
self.editor.source_view.set_auto_indent(True)
self.editor.source_view.set_resize_mode(gtk.RESIZE_PARENT)
### Comment this out if you don't want Monospace and want the default
### system font. Or change to suit your needs.
default_font = pango.FontDescription("Monospace 10")
if default_font:
self.editor.source_view.modify_font(default_font)
### -------------------------------------------------------- ###
self.editor.source_view.connect("key-press-event", self.key_event)
self.editor.buff.connect("mark-set", self.mark_set)
self.SL = SnippetLoader()
self.SNIPPETS.extend(self.SL.get_common())
self.SNIPPETS.extend(BUILT_IN_SNIPPETS)
# For testing purposes.
#self.syntax_highlight(os.path.abspath("/home/majyk/dev/python/test.py"))
def load_snippets(self):
types = []
try:
types = MIME_ALIAS[self.mime_type]
except KeyError:
print "This mime-type has no snippets defined"
types=None
if not types == None:
print types
if len(types)==1:
self.SNIPPETS.extend(self.SL.load(types[0]))
elif len(types)>1:
for t in types:
self.SNIPPETS.extend(self.SL.load(t))
#print "snippets available:"
#for s in self.SNIPPETS:
# print s.shortcut
def scroll_to_insert(self, parms=None):
self.editor.source_view.scroll_mark_onscreen( self.editor.buff.get_mark("insert"))
def event_buffer_change(self, parms):
if(parms.has_key("filename") and parms.has_key("text")):
self.set_text(parms["filename"], parms["text"])
def set_text(self, filename, text):
if(filename):
self.syntax_highlight(filename)
self.editor.buff.set_text(text)
self.editor.buff.place_cursor(self.editor.buff.get_start_iter())
def mark_set(self, textbuffer, _iter, textmark):
# if we are in a snippet and the user moves the cursor out of the snippet bounds without
# finishing the snippet then we need to clean up and turn the snippet mode off
if self.IN_SNIPPET and self.SNIPPET_START_MARK and self.SNIPPET_END_MARK:
SNIPPET_START_ITER = self.editor.buff.get_iter_at_mark(self.SNIPPET_START_MARK)
SNIPPET_END_ITER = self.editor.buff.get_iter_at_mark(self.SNIPPET_END_MARK)
curr_iter = self.get_cursor_iter()
if not curr_iter.in_range(SNIPPET_START_ITER, SNIPPET_END_ITER):
if self.SNIPPET_START_MARK and self.SNIPPET_END_MARK:
self.IN_SNIPPET = False
# Do mime-type magic and switch the language syntax highlight mode and snippets
def syntax_highlight(self, filename):
if not (os.path.exists(filename)):
print "(%s) does not exist" % (filename)
return
print "filename = (%s)" % (filename)
language = self.get_language(filename)
if language:
self.editor.buff.set_highlight_syntax(True)
self.editor.buff.set_language(language)
#print "Setting the snippets to the following language mime-type: " + mime_type
self.load_snippets()
else:
print "A syntax highlight mode for this mime-type does not exist."
self.editor.buff.set_highlight_syntax(False)
def complete_special_chars(self, widget, char):
curr_iter = self.editor.buff.get_iter_at_mark( self.editor.buff.get_insert() )
self.editor.buff.insert(curr_iter, char)
curr_iter = self.editor.buff.get_iter_at_mark( self.editor.buff.get_insert() )
curr_iter.backward_chars(1)
self.editor.buff.place_cursor(curr_iter)
def get_cursor_iter(self):
cursor_mark = self.editor.buff.get_insert()
cursor_iter = self.editor.buff.get_iter_at_mark(cursor_mark)
return cursor_iter
def get_line_number(self):
cursor_iter = self.get_cursor_iter(self.editor.buff)
line_number = cursor_iter.get_line()
return line_number
# Adds marks into the buffer for the start and end offsets for each variable
def mark_variables(self, offsets):
marks = []
for o in offsets:
start_iter = self.editor.buff.get_iter_at_offset(o["start"])
end_iter = self.editor.buff.get_iter_at_offset(o["end"])
start_mark = self.editor.buff.create_mark(None, start_iter, True)
end_mark = self.editor.buff.create_mark(None, end_iter, False)
insert_mark = { "start" : start_mark,
"end" : end_mark }
marks.append(insert_mark)
return marks
# This algorithm gets variable offsets so that we can use those offsets
# to get iterators to create marks, the marks are used in order to select
# the text and move the cursor using the tab key
#
# This does a little more than just get the variable offsets, it also
# deletes the variable and replaces it with just the word identifier
#
# If the variable is a ${cursor} it will delete it and get it's start offset
# so when we mark it we can tab to a nonvisibly marked location in the snippet.
def get_variable_offsets(self,string, current_offset):
offsets = []
start_and_end_offsets = {}
# use the regular expression to get an iterator over our string
# variables will hold match objects
variables = self.variable_re.finditer(string)
for var in variables:
occur_offset_start = current_offset + var.span()[0]
occur_offset_end = current_offset + var.span()[1]
start_iter = self.editor.buff.get_iter_at_offset(occur_offset_start)
end_iter = self.editor.buff.get_iter_at_offset(occur_offset_end)
# delete the full variable
self.editor.buff.delete(start_iter, end_iter)
# if it's a ${cursor} variable we don't want to insert
# any new text. Just go to the else and get it's start
# offset, used later to mark that location
if not var.group() == "${cursor}":
# insert the variable identifier into the buffer
# at the start location
self.editor.buff.insert(start_iter, var.group(1))
current_offset = current_offset-3
# record our start and end offsets used later
# to mark these variables so we can select the text
start_and_end_offsets = {
"start" : occur_offset_start,
"end" : occur_offset_end-3
}
#print "START = %d | END = %d" % (start_and_end_offsets["start"], start_and_end_offsets["end"])
else:
# if we have a ${cursor} then we want a
# marker added with no text so we can
# tab to it.
start_and_end_offsets = {
"start" : occur_offset_start,
"end" : occur_offset_start
}
current_offset = current_offset-len(var.group())
# put the start/end offsets into a list of dictionaries
offsets.append( start_and_end_offsets )
return offsets
# This functions purpose is to add spaces/tabs to the snippets according
# to what level we have indented to
def auto_indent_snippet(self, snippet):
cursor_iter = self.get_cursor_iter()
line_number = cursor_iter.get_line()
start_of_current_line_iter = self.editor.buff.get_iter_at_line(line_number)
text = self.editor.buff.get_text(cursor_iter, start_of_current_line_iter)
space_re = re.compile(' ')
tab_re = re.compile('\t')
tab_count = len(tab_re.findall(text))
space_count = len(space_re.findall(text))
lines = snippet.split("\n")
new_lines = []
tabs = ""
spaces = ""
if tab_count > 0:
for i in range(tab_count):
tabs = tabs + "\t"
if space_count > 0:
for i in range(space_count):
spaces = spaces + " "
for i,line in enumerate(lines):
# don't add any of the spaces/tabs to the first
# line in the snippet
if not i == 0:
snip = tabs + spaces + line
new_lines.append(snip)
else:
new_lines.append(line)
return "\n".join(new_lines)
def snippet_completion(self):
cursor_iter = self.get_cursor_iter()
line_number = cursor_iter.get_line()
start_of_current_line_iter = self.editor.buff.get_iter_at_line(line_number)
text = self.editor.buff.get_text(start_of_current_line_iter, cursor_iter)
words = text.split()
if words:
word_last_typed = words.pop()
word_index = text.find(word_last_typed)
# Run through all snippets trying to find a match
for s in self.SNIPPETS:
key=s.shortcut
value=s.snippet
if word_last_typed == key:
self.TABBED = True
value = self.auto_indent_snippet(value)
word_index = text.rfind(word_last_typed)
index_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index)
end_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index+len(word_last_typed))
self.editor.buff.delete(index_iter, end_iter)
overall_offset = index_iter.get_offset()
self.editor.buff.insert(index_iter, value)
start_mark_iter = self.editor.buff.get_iter_at_line_offset(line_number, word_index)
end_mark_iter = self.editor.buff.get_iter_at_offset(start_mark_iter.get_offset()+len(value))
self.SNIPPET_START_MARK = self.editor.buff.create_mark(None, start_mark_iter, True)
self.SNIPPET_END_MARK = self.editor.buff.create_mark(None, end_mark_iter, False)
offsets = self.get_variable_offsets(value, overall_offset)
if offsets:
marks = self.mark_variables(offsets)
if marks:
_iter = self.editor.buff.get_iter_at_offset( offsets[0]["start"] )
self.editor.buff.place_cursor(_iter)
marks.reverse()
for mark in marks:
self.SNIPPET_MARKS.insert(0, mark)
offsets.reverse()
for offset in offsets:
self.SNIPPET_OFFSETS.insert(0,offset)
self.IN_SNIPPET = True
else:
self.HAS_NO_VARIABLES=True
def pair_text(self, pair_chars):
selection = self.editor.buff.get_selection_bounds()
if(selection):
selected_text = self.editor.buff.get_text(selection[0], selection[1])
self.editor.buff.delete(selection[0], selection[1])
self.editor.buff.insert_at_cursor("%s%s%s" % (pair_chars[0],selected_text,pair_chars[1]))
return True
return False
def comment_line(self, comment_char):
selection = self.editor.buff.get_selection_bounds()
if(selection):
selected_text = self.editor.buff.get_text(selection[0], selection[1])
self.editor.buff.delete(selection[0], selection[1])
for line in selected_text.splitlines(True):
self.editor.buff.insert_at_cursor("%s %s" % (comment_char, line))
return True
return False
def key_event(self, widget, key_press):
keycodes = {
"space" : 32,
"tab" : 65289,
"quote" : 34,
"open_brace" : 123,
"open_bracket" : 91,
"open_paren" : 40,
"less_than" : 60,
"single_quote" : 39,
"pound" : 35
}
# Need to add a new key, just uncomment this, run the program
# and look at the output from the key press
#print key_press.keyval
if not key_press.keyval == keycodes["tab"]:
self.TABBED = False
if key_press.keyval == keycodes["pound"]:
if key_press.state & gtk.gdk.SHIFT_MASK:
comment_char = None
if(self.mime_type == ("text/x-python") or
self.mime_type == ("application/x-ruby") or
self.mime_type == ("application/x-shellscript")
):
comment_char = "#"
elif (self.mime_type == ("text/x-java") or
self.mime_type == ("text/x-c++src")
):
comment_char = "//"
if(comment_char):
if(self.comment_line(comment_char)):
return True
if key_press.keyval == keycodes["quote"]:
if (self.pair_text(["\"", "\""])):
return True
elif key_press.keyval == keycodes["open_brace"]:
if (self.pair_text(["{", "}"])):
return True
elif key_press.keyval == keycodes["open_bracket"]:
if (self.pair_text(["[", "]"])):
return True
elif key_press.keyval == keycodes["open_paren"]:
if (self.pair_text(["(", ")"])):
return True
elif key_press.keyval == keycodes["less_than"]:
if (self.pair_text(["<", ">"])):
return True
elif key_press.keyval == keycodes["single_quote"]:
if (self.pair_text(["\'", "\'"])):
return True
elif key_press.keyval == keycodes["tab"]:
if not self.TABBED:
self.snippet_completion()
if self.HAS_NO_VARIABLES:
self.HAS_NO_VARIABLES=False
return True
if(len(self.SNIPPET_MARKS)>0):
for i, v in enumerate(self.SNIPPET_MARKS):
if len(self.SNIPPET_MARKS)>1:
self.editor.source_view.scroll_mark_onscreen(self.SNIPPET_MARKS[i+1]["start"])
_iter = self.editor.buff.get_iter_at_mark(v["start"])
mark_offset = _iter.get_offset()
self.editor.buff.select_range( self.editor.buff.get_iter_at_mark(v["start"]), self.editor.buff.get_iter_at_mark(v["end"]))
self.editor.buff.delete_mark(v["start"])
self.editor.buff.delete_mark(v["end"])
del self.SNIPPET_MARKS[i]
del self.SNIPPET_OFFSETS[i]
if (i==len(self.SNIPPET_OFFSETS)):
self.IN_SNIPPET = False
self.editor.buff.delete_mark(self.SNIPPET_START_MARK)
self.editor.buff.delete_mark(self.SNIPPET_END_MARK)
break
return True
return False
def load(self):
pass
def unload(self):
pass
def __get_language_for_mime_type(self, mime):
from gtksourceview2 import language_manager_get_default
lang_manager = language_manager_get_default()
lang_ids = lang_manager.get_language_ids()
for i in lang_ids:
lang = lang_manager.get_language(i)
for m in lang.get_mime_types():
if m == mime: return lang
return None
def get_language(self, uri):
try:
if uri is None: return None
from gnomevfs import get_mime_type
self.mime_type = gio.File(uri.strip()).query_info("*").get_content_type()
language = self.__get_language_for_mime_type(self.mime_type)
except RuntimeError:
print "Caught runtime error when determining mimetype or language"
return None
return language
| frankhale/nyana | nyana/plugins/SnippetViewPlugin.py | Python | gpl-3.0 | 18,007 | 0.038929 |
# pylint:disable=consider-using-with
from typing import List, Dict
import json
import subprocess
import argparse
import tempfile
import os
import itertools
from collections import defaultdict
import angr
UNIQUE_STRING_COUNT = 20
# strings longer than MAX_UNIQUE_STRING_LEN will be truncated
MAX_UNIQUE_STRING_LEN = 70
def get_basic_info(ar_path: str) -> Dict[str,str]:
"""
Get basic information of the archive file.
"""
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
# Load arch and OS information from the first .o file
o_files = [ f for f in os.listdir(".") if f.endswith(".o") ]
if o_files:
proj = angr.Project(o_files[0], auto_load_libs=False)
arch_name = proj.arch.name.lower()
os_name = proj.simos.name.lower()
os.chdir(cwd)
return {
'arch': arch_name,
'platform': os_name,
}
def get_unique_strings(ar_path: str) -> List[str]:
"""
For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings.
"""
# get symbols
nm_output = subprocess.check_output(["nm", ar_path])
nm_lines = nm_output.decode("utf-8").split("\n")
symbols = set()
for nm_line in nm_lines:
symbol_types = "UuVvTtRrDdWwBbNn"
for symbol_type in symbol_types:
if f" {symbol_type} " in nm_line:
# parse it
symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ")
if "." in symbol:
symbols |= set(symbol.split("."))
else:
symbols.add(symbol)
break
# extract the archive file into a temporary directory
all_strings = set()
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
for filename in os.listdir("."):
if filename.endswith(".o"):
strings = subprocess.check_output(["strings", "-n", "8", filename])
strings = strings.decode("utf-8").split("\n")
non_symbol_strings = set()
for s in strings:
if s in symbols:
continue
if "." in s and any(subs in symbols for subs in s.split(".")):
continue
# C++ specific
if "::" in s:
continue
if "_" in s:
# make sure it's not a substring of any symbol
is_substring = False
for symbol in symbols:
if s in symbol:
is_substring = True
break
if is_substring:
continue
non_symbol_strings.add(s)
all_strings |= non_symbol_strings
os.chdir(cwd)
grouped_strings = defaultdict(set)
for s in all_strings:
grouped_strings[s[:5]].add(s)
sorted_strings = list(sorted(all_strings, key=len, reverse=True))
ctr = 0
picked = set()
unique_strings = [ ]
for s in sorted_strings:
if s[:5] in picked:
continue
unique_strings.append(s[:MAX_UNIQUE_STRING_LEN])
picked.add(s[:5])
ctr += 1
if ctr >= UNIQUE_STRING_COUNT:
break
return unique_strings
def run_pelf(pelf_path: str, ar_path: str, output_path: str):
subprocess.check_call([pelf_path, "-r43:0:0", ar_path, output_path])
def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str):
if " " not in sig_name:
sig_name_arg = f"-n{sig_name}"
else:
sig_name_arg = f"-n\"{sig_name}\""
proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
if b"COLLISIONS:" in stderr:
return False
return True
def process_exc_file(exc_path: str):
"""
We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely
result baed on a set of predefined rules.
TODO: Add caller-callee-based de-duplication.
"""
with open(exc_path, "r") as f:
data = f.read()
lines = data.split("\n")
# parse groups
ctr = itertools.count()
idx = 0
groups = defaultdict(dict)
for line in lines:
if line.startswith(";"):
continue
if not line:
idx = next(ctr)
else:
# parse the function name
func_name = line[:line.index("\t")].strip(" ")
groups[idx][func_name] = line
# for each group, decide the one to keep
for idx in list(groups.keys()):
g = groups[idx]
if len(g) == 1:
# don't pick anything. This is a weird case that I don't understand
continue
if all(func_name.endswith(".cold") for func_name in g):
# .cold functions. doesn't matter what we pick
continue
non_cold_names = [ ]
for func_name in g:
if func_name.endswith(".cold"):
continue
non_cold_names.append(func_name)
# sort it
non_cold_names = list(sorted(non_cold_names, key=len))
# pick the top one
the_chosen_one = non_cold_names[0]
line = g[the_chosen_one]
g[the_chosen_one] = "+" + line
# output
with open(exc_path, "w") as f:
for g in groups.values():
for line in g.values():
f.write(line + "\n")
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ar_path", help="Path of the .a file to build signatures for")
parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)")
parser.add_argument("sig_path", help="File name of the generated signature")
parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta "
"data file.")
parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta "
"data file.")
# parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in
# the meta data file.")
parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the "
"meta data file.")
parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the "
"meta data file.")
parser.add_argument("--pelf_path", help="Path of pelf")
parser.add_argument("--sigmake_path", help="Path of sigmake")
args = parser.parse_args()
if args.pelf_path:
pelf_path = args.pelf_path
elif "pelf_path" in os.environ:
pelf_path = os.environ['pelf_path']
else:
raise ValueError("pelf_path must be specified.")
if args.sigmake_path:
sigmake_path = args.sigmake_path
elif "sigmake_path" in os.environ:
sigmake_path = os.environ['sigmake_path']
else:
raise ValueError("sigmake_path must be specified.")
compiler = args.compiler
if compiler:
compiler = compiler.lower()
compiler_version = args.compiler_version
if compiler_version:
compiler_version = compiler_version.lower()
os_name = args.os
if os_name:
os_name = os_name.lower()
os_version = args.os_version
if os_version:
os_version = os_version.lower()
# Get basic information
# Get basic information
basic_info = get_basic_info(args.ar_path)
# Get unique strings from the library
unique_strings = get_unique_strings(args.ar_path)
# Build necessary file paths
sig_path_basename = os.path.basename(args.sig_path)
if "." in sig_path_basename:
sig_dir = os.path.dirname(args.sig_path)
filename = sig_path_basename[:sig_path_basename.rfind(".")]
exc_path = os.path.join(
sig_dir,
filename + ".exc"
)
meta_path = os.path.join(
sig_dir,
filename + ".meta"
)
else:
exc_path = args.sig_path + ".exc"
meta_path = args.sig_path + ".meta"
if os.path.isfile(exc_path):
# Remove existing exc files (if there is one)
os.remove(exc_path)
# Make a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
ar_path = args.ar_path
basename = os.path.basename(ar_path)
# sanitize basename since otherwise sigmake is not happy with it
if basename.endswith(".a"):
basename = basename[:-2]
basename = basename.replace("+", "plus")
# sanitize signame as well
sig_name = args.sig_name
sig_name = sig_name.replace("+", "plus")
pat_path = os.path.join(tmpdirname, basename + ".pat")
run_pelf(pelf_path, ar_path, pat_path)
has_collision = not run_sigmake(sigmake_path, sig_name, pat_path, args.sig_path)
if has_collision:
process_exc_file(exc_path)
# run sigmake again
has_collision = not run_sigmake(sigmake_path, args.sig_name, pat_path, args.sig_path)
assert not has_collision
with open(meta_path, "w") as f:
metadata = {
'unique_strings': unique_strings,
}
metadata.update(basic_info)
if compiler_version:
metadata['compiler_version'] = compiler_version
if compiler:
metadata['compiler'] = compiler
if os_name:
metadata['os'] = os_name
if os_version:
metadata['os_version'] = os_version
f.write(json.dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| angr/angr | angr/flirt/build_sig.py | Python | bsd-2-clause | 10,375 | 0.00241 |
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
_flatten = tkinter._flatten
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.items():
if ignore and opt in ignore:
continue
if isinstance(value, (list, tuple)):
v = []
for val in value:
if isinstance(val, str):
v.append(str(val) if val else '{}')
else:
v.append(str(val))
# format v according to the script option, but also check for
# space in any value in v in order to group them correctly
value = format % ' '.join(
('{%s}' if ' ' in val else '%s') % val for val in v)
if script and value == '':
value = '{}' # empty string in Python is equivalent to {} in Tcl
opts.append(("-%s" % opt, value))
# Remember: _flatten skips over None
return _flatten(opts)
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.items():
opt_val = []
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
for statespec in value:
state, val = statespec[:-1], statespec[-1]
if len(state) > 1: # group multiple states
state = "{%s}" % ' '.join(state)
else: # single state
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or '{}'
if isinstance(val, (list, tuple)): # val needs to be grouped
val = "{%s}" % ' '.join(map(str, val))
opt_val.append("%s %s" % (state, val))
opts.append(("-%s" % opt, format % ' '.join(opt_val)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _format_mapdict({None: args[1:]})[1]
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _format_mapdict({None: args[2:]})[1]
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (args[1], )
if script:
spec = '{%s}' % spec
opts = ' '.join(map(str, opts))
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(map(str, _format_optdict(opts['configure'], True)))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(map(str, _format_mapdict(opts['map'], True)))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _dict_from_tcltuple(ttuple, cut_minus=True):
"""Break tuple in pairs, format it properly, then build the return
dict. If cut_minus is True, the supposed '-' prefixing options will
be removed.
ttuple is expected to contain an even number of elements."""
opt_start = 1 if cut_minus else 0
retdict = {}
it = iter(ttuple)
for opt, val in zip(it, it):
retdict[str(opt)[opt_start:]] = val
return tclobjs_to_py(retdict)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(val)
opts[opt] = val
return res
def _val_or_dict(options, func, *args):
"""Format options then call func with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = func(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _dict_from_tcltuple(res)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
adict[opt] = val
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(kw, self.tk.call, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(
self.tk.call(self._name, "map", style, '-%s' % query_opt))
return _dict_from_tcltuple(
self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.call(self._name, "element", "names")
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.call(self._name, "element", "options", elementname)
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.call(self._name, "theme", "names")
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.call(self._w, "instate", ' '.join(statespec))
if ret and callback:
return callback(*args, **kw)
return bool(ret)
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self.tk.call(self._w, "bbox", index)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
# The "values" option may need special formatting, so leave to
# _format_optdict the responsibility to format it
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
Entry.__init__(self, master, "ttk::combobox", **kw)
def __setitem__(self, item, value):
if item == "values":
value = _format_optdict({item: value})[1]
Entry.__setitem__(self, item, value)
def configure(self, cnf=None, **kw):
"""Custom Combobox configure, created to properly format the values
option."""
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
return Entry.configure(self, cnf, **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.call(self._w, "index", tab_id)
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.call(self._w, "tabs") or ()
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.call(self._w, "sashpos", index, newpos)
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if ommited."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self.tk.call(self._w, "bbox", item, column)
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.call(self._w, "children", item or '') or ()
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return bool(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.call(self._w, "index", item)
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
return self.tk.call(self._w, "selection", selop, items)
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""With one argument, returns a dictionary of column/value pairs
for the specified item. With two arguments, returns the current
value of the specified column. With three arguments, sets the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _dict_from_tcltuple(res, False)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
return self.tk.call(self._w, "tag", "has", tagname, item)
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct an horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_, to = self.scale['from'], self.scale['to']
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/tkinter/ttk.py | Python | mit | 56,245 | 0.002471 |
from pywb.rewrite.rewrite_live import LiveRewriter
from pywb.rewrite.url_rewriter import UrlRewriter
from pywb.rewrite.wburl import WbUrl
from pywb import get_test_dir
from io import BytesIO
# This module has some rewriting tests against the 'live web'
# As such, the content may change and the test may break
urlrewriter = UrlRewriter('20131226101010/http://example.com/some/path/index.html', '/pywb/')
bn_urlrewriter = UrlRewriter('20131226101010bn_/http://example.com/some/path/index.html', '/pywb/')
def head_insert_func(rule, cdx):
if rule.js_rewrite_location != 'urls':
return '<script src="/static/__pywb/wombat.js"> </script>'
else:
return ''
def test_csrf_token_headers():
rewriter = LiveRewriter()
env = {'HTTP_X_CSRFTOKEN': 'wrong', 'HTTP_COOKIE': 'csrftoken=foobar'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-CSRFToken': 'foobar', 'Cookie': 'csrftoken=foobar'}
def test_forwarded_scheme():
rewriter = LiveRewriter()
env = {'HTTP_X_FORWARDED_PROTO': 'https', 'Other': 'Value'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-Forwarded-Proto': 'http'}
def test_req_cookie_rewrite_1():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'A=B'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'A=B; FOO=&bar=1'}
def test_req_cookie_rewrite_2():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'FOO=goo'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'FOO=&bar=1'}
def test_req_cookie_rewrite_3():
rewriter = LiveRewriter()
env = {}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': '; FOO=&bar=1'}
def test_local_1():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location rewritten
assert 'window.WB_wombat_location = "/other.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head_banner_only():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location NOT rewritten
assert 'window.location = "/other.html"' in buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_banner_only_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff, buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_2_link_only_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test)/nolocation_rewrite')
# no wombat insert
assert '<head><script src="/static/__pywb/wombat.js"> </script>' not in buff
# JS location NOT rewritten, JS link rewritten
assert 'window.location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_js_loc_only_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test,loconly)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location rewritten, JS link NOT rewritten
assert 'window.WB_wombat_location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test,norewrite)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_unclosed_script():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_unclosed_script.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html";\n}\n</script>' in buff, buff
def test_example_1():
status_headers, buff = get_rewritten('http://example.com/', urlrewriter, req_headers={'Connection': 'close'})
# verify header rewriting
assert (('X-Archive-Orig-connection', 'close') in status_headers.headers), status_headers
# verify utf-8 charset detection
assert status_headers.get_header('content-type') == 'text/html; charset=utf-8'
assert '/pywb/20131226101010/http://www.iana.org/domains/example' in buff, buff
def test_example_2_redirect():
status_headers, buff = get_rewritten('http://httpbin.org/redirect-to?url=http://example.com/', urlrewriter)
# redirect, no content
assert status_headers.get_statuscode() == '302'
assert len(buff) == 0
def test_example_3_rel():
status_headers, buff = get_rewritten('//example.com/', urlrewriter)
assert status_headers.get_statuscode() == '200'
def test_example_4_rewrite_err():
# may occur in case of rewrite mismatch, the /// gets stripped off
status_headers, buff = get_rewritten('http://localhost:8080///example.com/', urlrewriter)
assert status_headers.get_statuscode() == '200'
def test_example_domain_specific_3():
status_headers, buff = get_rewritten('http://facebook.com/digitalpreservation', urlrewriter, follow_redirects=True)
# comment out Bootloader.configurePage, if it is still there
if 'Bootloader.configurePage' in buff:
assert '/* Bootloader.configurePage' in buff
def test_wombat_top():
#status_headers, buff = get_rewritten('https://assets-cdn.github.com/assets/github-0f06d0f46fe7bcfbf31f2380f23aec15ba21b8ec.js', urlrewriter)
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/toptest.js', urlrewriter)
assert 'WB_wombat_top!==window' in buff
def test_post():
buff = BytesIO('ABC=DEF')
env = {'REQUEST_METHOD': 'POST',
'HTTP_ORIGIN': 'http://httpbin.org',
'HTTP_HOST': 'httpbin.org',
'wsgi.input': buff}
status_headers, resp_buff = get_rewritten('http://httpbin.org/post', urlrewriter, env=env)
assert status_headers.get_statuscode() == '200', status_headers
def get_rewritten(*args, **kwargs):
return LiveRewriter().get_rewritten(remote_only=False, *args, **kwargs)
| machawk1/pywb | pywb/rewrite/test/test_rewrite_live.py | Python | gpl-3.0 | 9,395 | 0.007025 |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Online Help Plugin"""
from spyderlib.qt.QtCore import Signal
import os.path as osp
# Local imports
from spyderlib.baseconfig import get_conf_path, _
from spyderlib.widgets.pydocgui import PydocBrowser
from spyderlib.plugins import SpyderPluginMixin
class OnlineHelp(PydocBrowser, SpyderPluginMixin):
"""
Online Help Plugin
"""
sig_option_changed = Signal(str, object)
CONF_SECTION = 'onlinehelp'
LOG_PATH = get_conf_path('.onlinehelp')
def __init__(self, parent):
self.main = parent
PydocBrowser.__init__(self, parent)
SpyderPluginMixin.__init__(self, parent)
# Initialize plugin
self.initialize_plugin()
self.register_widget_shortcuts("Editor", self.find_widget)
self.webview.set_zoom_factor(self.get_option('zoom_factor'))
self.url_combo.setMaxCount(self.get_option('max_history_entries'))
self.url_combo.addItems( self.load_history() )
#------ Public API ---------------------------------------------------------
def load_history(self, obj=None):
"""Load history from a text file in user home directory"""
if osp.isfile(self.LOG_PATH):
history = [line.replace('\n','')
for line in file(self.LOG_PATH, 'r').readlines()]
else:
history = []
return history
def save_history(self):
"""Save history to a text file in user home directory"""
file(self.LOG_PATH, 'w').write("\n".join( \
[ unicode( self.url_combo.itemText(index) )
for index in range(self.url_combo.count()) ] ))
#------ SpyderPluginMixin API ---------------------------------------------
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginMixin.visibility_changed(self, enable)
if enable and not self.is_server_running():
self.initialize()
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
return _('Online help')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
self.url_combo.lineEdit().selectAll()
return self.url_combo
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
self.save_history()
self.set_option('zoom_factor', self.webview.get_zoom_factor())
return True
def refresh_plugin(self):
"""Refresh widget"""
pass
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return []
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
| jromang/retina-old | distinclude/spyderlib/plugins/onlinehelp.py | Python | gpl-3.0 | 3,199 | 0.00813 |
"""Support for the Hive switches."""
from datetime import timedelta
from homeassistant.components.switch import SwitchEntity
from . import ATTR_AVAILABLE, ATTR_MODE, DATA_HIVE, DOMAIN, HiveEntity, refresh_system
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Hive Switch."""
if discovery_info is None:
return
hive = hass.data[DOMAIN].get(DATA_HIVE)
devices = hive.devices.get("switch")
entities = []
if devices:
for dev in devices:
entities.append(HiveDevicePlug(hive, dev))
async_add_entities(entities, True)
class HiveDevicePlug(HiveEntity, SwitchEntity):
"""Hive Active Plug."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def name(self):
"""Return the name of this Switch device if any."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"].get("online")
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_AVAILABLE: self.attributes.get(ATTR_AVAILABLE),
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.device["status"]["power_usage"]
@property
def is_on(self):
"""Return true if switch is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
if self.device["hiveType"] == "activeplug":
await self.hive.switch.turn_on(self.device)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self.device["hiveType"] == "activeplug":
await self.hive.switch.turn_off(self.device)
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.switch.get_plug(self.device)
| partofthething/home-assistant | homeassistant/components/hive/switch.py | Python | apache-2.0 | 2,449 | 0.000817 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Watch a running build job and output changes to the screen.
"""
import fcntl
import os
import select
import socket
import sys
import tempfile
import termios
import time
import traceback
from rmake import errors
from rmake.build import buildjob, buildtrove
from rmake.cmdline import query
def _getUri(client):
if not isinstance(client.uri, str) or client.uri.startswith('unix://'):
fd, tmpPath = tempfile.mkstemp()
os.close(fd)
uri = 'unix://' + tmpPath
else:
host = socket.gethostname()
uri = 'http://%s' % host
tmpPath = None
return uri, tmpPath
def monitorJob(client, jobId, showTroveDetails=False, showBuildLogs=False,
exitOnFinish=None, uri=None, serve=True, out=None,
displayClass=None):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
if not displayClass:
displayClass = JobLogDisplay
try:
display = displayClass(client, showBuildLogs=showBuildLogs, out=out,
exitOnFinish=exitOnFinish)
client = client.listenToEvents(uri, jobId, display,
showTroveDetails=showTroveDetails,
serve=serve)
return client
finally:
if serve and tmpPath:
os.remove(tmpPath)
def waitForJob(client, jobId, uri=None, serve=True):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
try:
display = SilentDisplay(client)
display._primeOutput(jobId)
return client.listenToEvents(uri, jobId, display, serve=serve)
finally:
if tmpPath:
os.remove(tmpPath)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=True):
self.client = client
self.finished = False
self.exitOnFinish = True # override exitOnFinish setting
self.showBuildLogs = showBuildLogs
if not out:
out = sys.stdout
self.out = out
def close(self):
pass
def _serveLoopHook(self):
pass
def _msg(self, msg, *args):
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
class SilentDisplay(_AbstractDisplay):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=None):
_AbstractDisplay.__init__(self, client, out=out,
showBuildLogs=showBuildLogs,
exitOnFinish=exitOnFinish)
self.buildingTroves = {}
def _tailBuildLog(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [mark, True]
self.out.write('Tailing %s build log:\n\n' % troveTuple[0])
def _stopTailing(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [ mark, False ]
def _serveLoopHook(self):
if not self.buildingTroves:
return
for (jobId, troveTuple), (mark, tail) in self.buildingTroves.items():
if not tail:
continue
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
moreData = True
data = ''
self.out.write(data)
if not moreData:
del self.buildingTroves[jobId, troveTuple]
else:
self.buildingTroves[jobId, troveTuple][0] = mark
def _jobTrovesSet(self, jobId, troveData):
self._msg('[%d] - job troves set' % jobId)
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob.stateNames[state]
if self._isFinished():
self._serveLoopHook()
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state in (buildtrove.TroveState.BUILDING,
buildtrove.TroveState.RESOLVING))
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - State: %s' % (jobId, troveTuple[0], state))
if status:
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
if isBuilding and self.showBuildLogs:
self._tailBuildLog(jobId, troveTuple)
else:
self._stopTailing(jobId, troveTuple)
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
def _primeOutput(self, jobId):
logMark = 0
while True:
newLogs = self.client.getJobLogs(jobId, logMark)
if not newLogs:
break
logMark += len(newLogs)
for (timeStamp, message, args) in newLogs:
print '[%s] [%s] - %s' % (timeStamp, jobId, message)
BUILDING = buildtrove.TroveState.BUILDING
troveTups = self.client.listTrovesByState(jobId, BUILDING).get(BUILDING, [])
for troveTuple in troveTups:
self._tailBuildLog(jobId, troveTuple)
_AbstractDisplay._primeOutput(self, jobId)
def set_raw_mode():
fd = sys.stdin.fileno()
oldTerm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldFlags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags | os.O_NONBLOCK)
return oldTerm, oldFlags
def restore_terminal(oldTerm, oldFlags):
fd = sys.stdin.fileno()
if oldTerm:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldTerm)
if oldFlags:
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None):
self.client = client
self.finished = False
self.showBuildLogs = showBuildLogs
self.troveStates = {}
self.troveIndex = None
self.troveDislay = False
self.out = OutBuffer(out)
def close(self):
pass
def _msg(self, msg, *args):
self.out.write('\r[%s] %s\n' % (time.strftime('%X'), msg))
self.out.write('(h for help)>')
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
def _dispatch(self, methodname, (callData, responseHandler, args)):
if methodname.startswith('_'):
raise NoSuchMethodError(methodname)
else:
responseHandler.sendResponse('')
getattr(self, methodname)(*args)
class SilentDisplay(_AbstractDisplay):
def _updateBuildLog(self):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, state, out=None):
_AbstractDisplay.__init__(self, client, out)
self.troveToWatch = None
self.watchTroves = False
self.buildingTroves = {}
self.state = state
self.lastLen = 0
self.promptFormat = '%(jobId)s %(name)s%(context)s - %(state)s - (%(tailing)s) ([h]elp)>'
self.updatePrompt()
def close(self):
self.out.write('\n')
self.out.flush()
def _msg(self, msg, *args):
self.erasePrompt()
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.writePrompt()
def updatePrompt(self):
if self.troveToWatch:
if self.troveToWatch not in self.state.troves:
self.troveToWatch = self.state.troves[0]
state = self.state.getTroveState(*self.troveToWatch)
state = buildtrove.stateNames[state]
name = self.troveToWatch[1][0].split(':', 1)[0] # remove :source
context = self.troveToWatch[1][3]
d = dict(jobId=self.troveToWatch[0], name=name, state=state,
context=(context and '{%s}' % context or ''))
else:
d = dict(jobId='(None)', name='(None)', state='', context='')
if not self.state.jobActive():
tailing = 'Job %s' % self.state.getJobStateName()
elif self.watchTroves:
tailing = 'Details on'
else:
tailing = 'Details off'
d['tailing'] = tailing
self.prompt = self.promptFormat % d
self.erasePrompt()
self.writePrompt()
def erasePrompt(self):
self.out.write('\r%s\r' % (' '*self.lastLen))
def writePrompt(self):
self.out.write(self.prompt)
self.lastLen = len(self.prompt)
self.out.flush()
def setWatchTroves(self, watchTroves=True):
self.watchTroves = watchTroves
self.updatePrompt()
def getWatchTroves(self):
return self.watchTroves
def setTroveToWatch(self, jobId, troveTuple):
self.troveToWatch = jobId, troveTuple
self.updatePrompt()
def _watchTrove(self, jobId, troveTuple):
if not self.watchTroves:
return False
return self.troveToWatch == (jobId, troveTuple)
def displayTroveStates(self):
if not self.troveToWatch:
return
self.erasePrompt()
job = self.client.getJob(self.troveToWatch[0])
query.displayTrovesByState(job, out=self.out)
self.writePrompt()
def setPrompt(self, promptFormat):
self.promptFormat = promptFormat
self.updatePrompt()
def updateBuildLog(self, jobId, troveTuple):
if not self._watchTrove(jobId, troveTuple):
return
mark = self.getMark(jobId, troveTuple)
if mark is None:
return
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
return
if data and data != '\n':
self.erasePrompt()
if data[0] == '\n':
# we've already got a \n because we've cleared
# the prompt.
data = data[1:]
self.out.write(data)
if data[-1] != '\n':
self.out.write('\n')
self.writePrompt()
if not moreData:
mark = None
self.setMark(jobId, troveTuple, mark)
def getMark(self, jobId, troveTuple):
if (jobId, troveTuple) not in self.buildingTroves:
# display max 80 lines of back log
self.buildingTroves[jobId, troveTuple] = -80
return self.buildingTroves[jobId, troveTuple]
def setMark(self, jobId, troveTuple, mark):
self.buildingTroves[jobId, troveTuple] = mark
def _jobTrovesSet(self, jobId, troveList):
self._msg('[%d] - job troves set' % jobId)
self.troveToWatch = jobId, troveList[0]
self.updatePrompt()
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob.stateNames[state]
if self._isFinished() and self.troveToWatch:
self.updateBuildLog(*self.troveToWatch)
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
self.updatePrompt()
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state == buildtrove.TroveState.BUILDING)
state = buildtrove.stateNames[state]
if troveTuple[3]:
name = '%s{%s}' % (troveTuple[0], troveTuple[3])
else:
name = troveTuple[0]
self._msg('[%d] - %s - State: %s' % (jobId, name, state))
if status and self._watchTrove(jobId, troveTuple):
self._msg('[%d] - %s - %s' % (jobId, name, status))
self.updatePrompt()
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
if self._watchTrove(jobId, troveTuple):
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if not self._watchTrove(jobId, troveTuple):
return
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
class OutBuffer(object):
def __init__(self, fd):
if fd is None:
fd = sys.stdout.fileno()
elif not isinstance(out, int):
fd = out.fileno()
self.fd = fd
self.data = []
def write(self, data):
self.data.append(data)
def fileno(self):
return self.fd
def flush(self):
while self.data:
self.check()
def check(self):
while self.data:
ready = select.select([], [self.fd], [], 0.1)[1]
if not ready:
return
rc = os.write(self.fd, self.data[0])
if rc < len(self.data[0]):
self.data[0] = self.data[0][rc:]
else:
self.data.pop(0)
class DisplayState(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client):
self.troves = []
self.states = {}
self.buildingTroves = {}
self.jobId = None
self.client = client
self.jobState = None
def _primeOutput(self, jobId):
#assert(not self.jobId)
self.jobId = jobId
job = self.client.getJob(jobId, withTroves=False)
self.jobState = job.state
if job.isBuilding() or job.isFinished() or job.isFailed():
self.updateTrovesForJob(jobId)
def jobActive(self):
return self.jobState in (
buildjob.JOB_STATE_STARTED,
buildjob.JOB_STATE_LOADING,
buildjob.JOB_STATE_LOADED,
buildjob.JOB_STATE_BUILD,
)
def getJobStateName(self):
if self.jobState is None:
return 'None'
return buildjob.stateNames[self.jobState]
def isFailed(self, jobId, troveTuple):
return (self.getTroveState(jobId, troveTuple)
== buildtrove.TroveState.FAILED)
def isBuilding(self, jobId, troveTuple):
return self.getTroveState(jobId, troveTuple) in (
buildtrove.TroveState.BUILDING,
buildtrove.TroveState.PREPARING,
buildtrove.TroveState.RESOLVING)
def isFailed(self, jobId, troveTuple):
# don't iterate through unbuildable - they are failures due to
# secondary causes.
return self.getTroveState(jobId, troveTuple) in (
buildtrove.TroveState.FAILED,)
def findTroveByName(self, troveName):
startsWith = None
for jobId, troveTuple in sorted(self.states):
if troveTuple[0].split(':', 1)[0] == troveName:
# exact matches take priority
return (jobId, troveTuple)
elif troveTuple[0].startswith(troveName) and startsWith is None:
startsWith = (jobId, troveTuple)
return startsWith
def getTroveState(self, jobId, troveTuple):
return self.states[jobId, troveTuple]
def getBuildingTroves(self):
return [ x[0] for x in self.states.iteritems()
if x[1] in (buildtrove.TroveState.BUILDING,
buildtrove.TroveState.RESOLVING) ]
def updateTrovesForJob(self, jobId):
self.troves = []
self.states = {}
for state, troveTupleList in self.client.listTrovesByState(jobId).items():
for troveTuple in troveTupleList:
self.troves.append((jobId, troveTuple))
self.states[jobId, troveTuple] = state
self.troves.sort()
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
if (jobId, troveTuple) not in self.states:
self.updateTrovesForJob(jobId)
else:
self.states[jobId, troveTuple] = state
def _jobStateUpdated(self, jobId, state, status):
self.jobState = state
if self._isBuilding():
self.updateTrovesForJob(jobId)
def _jobTrovesSet(self, jobId, troveList):
self.updateTrovesForJob(jobId)
def _isBuilding(self):
return self.jobState in (buildjob.JOB_STATE_BUILD,
buildjob.JOB_STATE_STARTED)
def _isFinished(self):
return self.jobState in (
buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT)
class DisplayManager(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
displayClass = JobLogDisplay
stateClass = DisplayState
def __init__(self, client, showBuildLogs, out=None, exitOnFinish=None):
self.termInfo = set_raw_mode()
if out is None:
out = open('/dev/tty', 'w')
self.state = self.stateClass(client)
self.display = self.displayClass(client, self.state, out)
self.client = client
self.troveToWatch = None
self.troveIndex = 0
self.showBuildLogs = showBuildLogs
if exitOnFinish is None:
exitOnFinish = False
self.exitOnFinish = exitOnFinish
def _receiveEvents(self, *args, **kw):
methodname = '_receiveEvents'
method = getattr(self.state, methodname, None)
if method:
try:
method(*args)
except errors.uncatchableExceptions:
raise
except Exception, err:
print 'Error in handler: %s\n%s' % (err,
traceback.format_exc())
method = getattr(self.display, methodname, None)
if method:
try:
method(*args)
except errors.uncatchableExceptions:
raise
except Exception, err:
print 'Error in handler: %s\n%s' % (err,
traceback.format_exc())
return ''
def getCurrentTrove(self):
if self.state.troves:
return self.state.troves[self.troveIndex]
else:
return None
def _primeOutput(self, jobId):
self.state._primeOutput(jobId)
self.display._msg('Watching job %s' % jobId)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def displayTrove(self, jobId, troveTuple):
self.display.setTroveToWatch(jobId, troveTuple)
state = self.state.getTroveState(jobId, troveTuple)
state = buildtrove.stateNames[state]
def _serveLoopHook(self):
ready = select.select([sys.stdin], [], [], 0.1)[0]
if ready:
cmd = sys.stdin.read(1)
if cmd == '\x1b':
cmd += sys.stdin.read(2)
if cmd == ' ':
self.do_switch_log()
elif cmd == 'n' or cmd == '\x1b[C':
self.do_next()
elif cmd == 'p' or cmd == '\x1b[D':
self.do_prev()
elif cmd == 'q':
sys.exit(0)
elif cmd == 'h':
self.do_help()
elif cmd == 'b':
self.do_next_building()
elif cmd == 'f':
self.do_next_failed()
elif cmd == 'i':
self.do_info()
elif cmd == 'l':
self.do_log()
elif cmd == 's':
self.do_status()
elif cmd == 'g':
self.do_goto()
if self.showBuildLogs:
for jobId, troveTuple in self.state.getBuildingTroves():
self.display.updateBuildLog(jobId, troveTuple)
def do_next(self):
if not self.state.troves:
return
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def do_next_building(self):
if not self.state.troves:
return
startIndex = self.troveIndex
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
while (not self.state.isBuilding(*self.getCurrentTrove())
and self.troveIndex != startIndex):
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.troveIndex != startIndex:
self.displayTrove(*self.getCurrentTrove())
def do_goto(self):
if not self.state.troves:
print 'No troves loaded yet'
return
self.display.erasePrompt()
restore_terminal(*self.termInfo)
try:
troveName = raw_input("\nName or part of name of trove: ")
troveInfo = self.state.findTroveByName(troveName)
if not troveInfo:
print 'No trove starting with "%s"' % troveName
self.display.writePrompt()
return
while not self.getCurrentTrove() == troveInfo:
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
self.displayTrove(*self.getCurrentTrove())
finally:
self.termInfo = set_raw_mode()
def do_next_failed(self):
if not self.state.troves:
return
startIndex = self.troveIndex
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
while (not self.state.isFailed(*self.getCurrentTrove())
and self.troveIndex != startIndex):
self.troveIndex = (self.troveIndex + 1) % len(self.state.troves)
if self.troveIndex != startIndex:
self.displayTrove(*self.getCurrentTrove())
def do_prev(self):
if not self.state.troves:
return
self.troveIndex = (self.troveIndex - 1) % len(self.state.troves)
if self.getCurrentTrove():
self.displayTrove(*self.getCurrentTrove())
def do_info(self):
if not self.getCurrentTrove():
return
jobId, troveTuple = self.getCurrentTrove()
job = self.client.getJob(jobId)
trove = job.getTrove(*troveTuple)
dcfg = query.DisplayConfig(self.client, showTracebacks=True)
self.display.setWatchTroves(False)
self.display.erasePrompt()
query.displayTroveDetail(dcfg, job, trove, out=self.display.out)
self.display.writePrompt()
def do_log(self):
if not self.getCurrentTrove():
return
jobId, troveTuple = self.getCurrentTrove()
job = self.client.getJob(jobId)
trove = job.getTrove(*troveTuple)
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple, 0)
if not data:
self.display._msg('No log yet.')
return
fd, path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(data)
try:
os.system('less %s' % path)
finally:
os.remove(path)
def do_help(self):
print
print "<space>: Turn on/off tailing of log"
print "<left>/<right>: move to next/prev trove in list"
print "b: move to next building trove"
print "f: move to next failed trove"
print "g: go to a particular trove"
print "h: print help"
print "i: display info for this trove"
print "l: display log for this trove in less"
print "q: quit"
print "s: display status on all troves"
def do_status(self):
self.display.setWatchTroves(False)
self.display.displayTroveStates()
def do_switch_log(self):
self.display.setWatchTroves(not self.display.getWatchTroves())
def _isFinished(self):
return self.display._isFinished()
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def close(self):
self.display.close()
restore_terminal(*self.termInfo)
| sassoftware/rmake3 | rmake/cmdline/monitor.py | Python | apache-2.0 | 26,810 | 0.001567 |
#!/usr/bin/env python
# encoding: utf-8
class MyRange(object):
def __init__(self, n):
self.idx = 0
self.n = n
def __iter__(self):
return self
def next(self):
if self.idx < self.n:
val = self.idx
self.idx += 1
return val
else:
raise StopIteration()
myRange = MyRange(3)
for i in myRange:
print i
| feixiao5566/Py_Rabbic | IO/自定义迭代器.py | Python | bsd-2-clause | 402 | 0.00995 |
# -*- coding: utf-8 -*-
import os.path
import re
import warnings
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
version = '0.2.1'
news = os.path.join(os.path.dirname(__file__), 'docs', 'news.rst')
news = open(news).read()
parts = re.split(r'([0-9\.]+)\s*\n\r?-+\n\r?', news)
found_news = ''
for i in range(len(parts)-1):
if parts[i] == version:
found_news = parts[i+i]
break
if not found_news:
warnings.warn('No news for this version found.')
long_description = """
keepassdb is a Python library that provides functionality for reading and writing
KeePass 1.x (and KeePassX) password databases.
This library brings together work by multiple authors, including:
- Karsten-Kai König <kkoenig@posteo.de>
- Brett Viren <brett.viren@gmail.com>
- Wakayama Shirou <shirou.faw@gmail.com>
"""
if found_news:
title = 'Changes in %s' % version
long_description += "\n%s\n%s\n" % (title, '-'*len(title))
long_description += found_news
setup(
name = "keepassdb",
version = version,
author = "Hans Lellelid",
author_email = "hans@xmpl.org",
url = "http://github.com/hozn/keepassdb",
license = "GPLv3",
description = "Python library for reading and writing KeePass 1.x databases.",
long_description = long_description,
packages = find_packages(),
include_package_data=True,
package_data={'keepassdb': ['tests/resources/*']},
install_requires=['pycrypto>=2.6,<3.0dev'],
tests_require = ['nose>=1.0.3'],
test_suite = 'keepassdb.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules'
],
use_2to3=True,
zip_safe=False # Technically it should be fine, but there are issues w/ 2to3
)
| hozn/keepassdb | setup.py | Python | gpl-3.0 | 2,399 | 0.013344 |
#!/Users/harvey/Projects/face-hack/venv/face/bin/python
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| harveybia/face-hack | venv/face/bin/player.py | Python | mit | 2,210 | 0 |
# Authors: Ashim Bhattarai <ashimb9@gmail.com>
# Thomas J Fan <thomasjpfan@gmail.com>
# License: BSD 3 clause
import numpy as np
from ._base import _BaseImputer
from ..utils.validation import FLOAT_DTYPES
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import _NAN_METRICS
from ..neighbors._base import _get_weights
from ..neighbors._base import _check_weights
from ..utils import is_scalar_nan
from ..utils._mask import _get_mask
from ..utils.validation import check_is_fitted
class KNNImputer(_BaseImputer):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using the mean value from
`n_neighbors` nearest neighbors found in the training set. Two samples are
close if the features that neither is missing are close.
Read more in the :ref:`User Guide <knnimpute>`.
.. versionadded:: 0.22
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to np.nan, since `pd.NA` will be converted to np.nan.
n_neighbors : int, default=5
Number of neighboring samples to use for imputation.
weights : {'uniform', 'distance'} or callable, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are
weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- callable : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
Distance metric for searching neighbors. Possible values:
- 'nan_euclidean'
- callable : a user-defined function which conforms to the definition
of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
accepts two arrays, X and Y, and a `missing_values` keyword in
`kwds` and returns a scalar distance value.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto the
output of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on the
missing indicator even if there are missing values at transform/test
time.
Attributes
----------
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
References
----------
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import KNNImputer
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2)
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
"""
def __init__(self, *, missing_values=np.nan, n_neighbors=5,
weights="uniform", metric="nan_euclidean", copy=True,
add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.copy = copy
def _calc_impute(self, dist_pot_donors, n_neighbors,
fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1,
axis=1)[:, :n_neighbors]
# Get weight matrix from from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
# Retrieve donor values and calculate kNN average
donors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
if self.metric not in _NAN_METRICS and not callable(self.metric):
raise ValueError(
"The selected metric does not support NaN values")
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got {}".format(self.n_neighbors))
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy)
_check_weights(self.weights)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
super()._fit_indicator(self._mask_fit_X)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy, reset=False)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = ~np.all(mask_fit_X, axis=0)
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask):
# No missing values in X
# Remove columns where the training data is all nan
return X[:, valid_mask]
row_missing_idx = np.flatnonzero(mask.any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
potential_donors_idx, = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = (dist_chunk[dist_idx_map[receivers_idx] - start]
[:, potential_donors_idx])
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(self._fit_X[:, col],
mask=mask_fit_X[:, col]).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = (dist_chunk[dist_idx_map[receivers_idx]
- start]
[:, potential_donors_idx])
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col])
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
force_all_finite=force_all_finite,
reduce_func=process_chunk)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
return super()._concatenate_indicator(X[:, valid_mask], X_indicator)
| kevin-intel/scikit-learn | sklearn/impute/_knn.py | Python | bsd-3-clause | 11,682 | 0 |
#!/usr/bin/python -Wall
# ================================================================
# Copyright (c) John Kerl 2007
# kerl.john.r@gmail.com
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
from math import *
from sackmat_m import *
import copy
# ----------------------------------------------------------------
# Let
# F: R^m -> R^n
# i.e.
# [ F_1(x_1, ..., x_m) ]
# F(x) = [ : : : ]
# [ F_n(x_1, ..., x_m) ].
# Then Dij = dFi/dxj, i=1..n, j=1..m (an n x m matrix).
# This is numerically approximated (forward-difference approximation) by
# (F(x1,...,xj+h,...,xn) - F(x1,...,xj,...,xn)) / h
# or (centered-difference approximation)
# (F(x1,...,xj+h/2,...,xn) - F(x1,...,xj-h/2,...,xn)) / h.
def jac(F, q, h=1e-6):
m = len(q)
n = len(F(q))
DFq = make_zero_matrix(n, m)
# Centered-difference approximation
h2 = 0.5 * h
for j in range(0, m):
qb = copy.copy(q)
qf = copy.copy(q)
qb[j] -= h2
qf[j] += h2
Fqb = F(qb)
Fqf = F(qf)
for i in range(0, n):
DFq[i][j] = (Fqf[i] - Fqb[i]) / h
return DFq
# ----------------------------------------------------------------
def F1(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def F2(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def do_point(F,q):
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
#print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def do_point_with_det(F,q):
print "-" * 40
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def frufru():
F = F1
do_point_with_det(F, [0,0,0])
print
do_point_with_det(F, [0,0,1])
do_point_with_det(F, [0,1,0])
do_point_with_det(F, [1,0,0])
print
do_point_with_det(F, [1,1,0])
do_point_with_det(F, [1,0,1])
do_point_with_det(F, [0,1,1])
print
do_point_with_det(F, [1,1,1])
do_point_with_det(F, [1,2,3])
do_point_with_det(F, [sqrt(0.5),sqrt(0.5),0])
a=0.1
do_point_with_det(F, [cos(a),sin(a),0])
a = 0.2
b = 0.3
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
a = 0.8
b = 0.2
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
print
# ----------------------------------------------------------------
def F(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def G(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def gt_something():
thetalo = 0
thetahi = 2*math.pi
philo = 0
phihi = math.pi
nphi = 12
ntheta = 12
if (len(sys.argv) == 3):
nphi = int(sys.argv[1])
ntheta = int(sys.argv[2])
dtheta = (thetahi-thetalo)/ntheta
dphi = (phihi-philo)/nphi
phi = 0
for ii in range(0, nphi):
theta = 0
for jj in range(0, ntheta):
x = sin(phi) * cos(theta)
y = sin(phi) * sin(theta)
z = cos(phi)
q = [x,y,z]
DF = jac(F, q)
d = DF.det()
# Let G(x,y,z) = x^2 + y^2 + z^2. The unit sphere is the level set
# for G(x,y,z) = 1.
# Tangent plane at (u,v,w):
# dG/dx(x-u) + dG/dy(y-v) + dG/dz(z-w)
# where (u,v,w) are the coordinates of the point q and (x,y,z) are variable.
DG = jac(G, q)
# For DF restricted to this tangent plane:
# * DG (i.e. grad G) is the normal vector
# * This gives a point-normal form for the tangent plane
# * Project the standard basis for R3 onto the tangent plane
# * Row-reduce
DF = jac(F, q)
# * Form an orthonormal basis
# * Compute DF of the basis
# * Row-reduce that to get the rank of DF on TM|q
#print "q = ", q,
#print "det(DF) = ", d
#print "%7.4f %7.4f %7.4f %7.4f %7.4f,%7.4f %7.4f,%7.4f %7.4f,%7.4f" % (
# x,y,z, d, DG[0][0], -DG[0][0]*x, DG[0][1], -DG[0][1]*y, DG[0][2], -DG[0][2]*z)
nx = DG[0][0]
ny = DG[0][1]
nz = DG[0][2]
nml = [nx, ny, nz]
e0 = [1,0,0]
e1 = [0,1,0]
e2 = [0,0,1]
# Project the standard basis for R3 down to the tangent plane TM|q.
proj_e0 = projperp(e0, nml)
proj_e1 = projperp(e1, nml)
proj_e2 = projperp(e2, nml)
proj_e = sackmat([proj_e0, proj_e1, proj_e2])
# Row-reduce, compute rank, and trim
proj_e.row_echelon_form()
rank = proj_e.rank_rr()
proj_e.elements = proj_e.elements[0:rank]
# Orthonormalize
proj_e = gram_schmidt(proj_e)
#print "q=[%7.4f,%7.4f,%7.4f]" % (x, y, z),
#print "nml=[%7.4f,%7.4f,%7.4f]" % (nx, ny, nz),
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
# Take DF of the orthonormal basis.
proj_e = proj_e.transpose()
proj_e = DF * proj_e
proj_e = proj_e.transpose()
rank = proj_e.rank()
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
#print "rank=", proj_e.rank_rr(),
#print "d=%11.3e" % (d),
# xxx hack
if (rank == 1):
d = 0.7
#print "%11.3e" % (d),
print "%8.4f" % (d),
#print
theta += dtheta
print
phi += dphi
gt_something()
| johnkerl/scripts-math | pythonlib/bin/jac.py | Python | bsd-2-clause | 5,698 | 0.044577 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import WebSiteManagementClientConfiguration
from .operations import AppServiceCertificateOrdersOperations, AppServiceEnvironmentsOperations, AppServicePlansOperations, CertificateRegistrationProviderOperations, CertificatesOperations, DeletedWebAppsOperations, DiagnosticsOperations, DomainRegistrationProviderOperations, DomainsOperations, ProviderOperations, RecommendationsOperations, ResourceHealthMetadataOperations, StaticSitesOperations, TopLevelDomainsOperations, WebAppsOperations, WebSiteManagementClientOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class WebSiteManagementClient(WebSiteManagementClientOperationsMixin):
"""WebSite Management Client.
:ivar app_service_certificate_orders: AppServiceCertificateOrdersOperations operations
:vartype app_service_certificate_orders:
azure.mgmt.web.v2020_06_01.aio.operations.AppServiceCertificateOrdersOperations
:ivar certificate_registration_provider: CertificateRegistrationProviderOperations operations
:vartype certificate_registration_provider:
azure.mgmt.web.v2020_06_01.aio.operations.CertificateRegistrationProviderOperations
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.web.v2020_06_01.aio.operations.DomainsOperations
:ivar top_level_domains: TopLevelDomainsOperations operations
:vartype top_level_domains: azure.mgmt.web.v2020_06_01.aio.operations.TopLevelDomainsOperations
:ivar domain_registration_provider: DomainRegistrationProviderOperations operations
:vartype domain_registration_provider:
azure.mgmt.web.v2020_06_01.aio.operations.DomainRegistrationProviderOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.web.v2020_06_01.aio.operations.CertificatesOperations
:ivar deleted_web_apps: DeletedWebAppsOperations operations
:vartype deleted_web_apps: azure.mgmt.web.v2020_06_01.aio.operations.DeletedWebAppsOperations
:ivar diagnostics: DiagnosticsOperations operations
:vartype diagnostics: azure.mgmt.web.v2020_06_01.aio.operations.DiagnosticsOperations
:ivar provider: ProviderOperations operations
:vartype provider: azure.mgmt.web.v2020_06_01.aio.operations.ProviderOperations
:ivar recommendations: RecommendationsOperations operations
:vartype recommendations: azure.mgmt.web.v2020_06_01.aio.operations.RecommendationsOperations
:ivar web_apps: WebAppsOperations operations
:vartype web_apps: azure.mgmt.web.v2020_06_01.aio.operations.WebAppsOperations
:ivar static_sites: StaticSitesOperations operations
:vartype static_sites: azure.mgmt.web.v2020_06_01.aio.operations.StaticSitesOperations
:ivar app_service_environments: AppServiceEnvironmentsOperations operations
:vartype app_service_environments:
azure.mgmt.web.v2020_06_01.aio.operations.AppServiceEnvironmentsOperations
:ivar app_service_plans: AppServicePlansOperations operations
:vartype app_service_plans: azure.mgmt.web.v2020_06_01.aio.operations.AppServicePlansOperations
:ivar resource_health_metadata: ResourceHealthMetadataOperations operations
:vartype resource_health_metadata:
azure.mgmt.web.v2020_06_01.aio.operations.ResourceHealthMetadataOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = WebSiteManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificate_registration_provider = CertificateRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.top_level_domains = TopLevelDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.domain_registration_provider = DomainRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.deleted_web_apps = DeletedWebAppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize)
self.provider = ProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.recommendations = RecommendationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.web_apps = WebAppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.static_sites = StaticSitesOperations(self._client, self._config, self._serialize, self._deserialize)
self.app_service_environments = AppServiceEnvironmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.app_service_plans = AppServicePlansOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_health_metadata = ResourceHealthMetadataOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "WebSiteManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_06_01/aio/_web_site_management_client.py | Python | mit | 8,647 | 0.004857 |
from FortyTwo.fortytwo import *
def Start():
"""No Clue what to add here"""
| 1m0r74l17y/FortyTwo | FortyTwo/__init__.py | Python | mit | 80 | 0.0125 |
import os
from django.conf import settings
import yaafelib as yf
import wave
import contextlib
from celery import task
from sepal.datasets.models import *
from sepal.datasets.utils import filter_by_key, find_dict_by_item
@task()
def handle_uploaded_file(f):
'''Saves an uploaded data source to MEDIA_ROOT/data_sources
'''
with open(os.path.join(settings.MEDIA_ROOT, 'data_sources', f.name), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return destination
@task()
def extract_features(dataset_id, instance_id, audiofile_path):
dataset = Dataset.objects.get(pk=dataset_id)
inst = Instance.objects.get(pk=instance_id)
n_frames, sample_rate, duration = 0, 0, 0
# Calculate the sample rate and duration
with contextlib.closing(wave.open(audiofile_path, 'r')) as audiofile:
n_frames = audiofile.getnframes()
sample_rate = audiofile.getframerate()
duration = n_frames / float(sample_rate)
# Format - {'Display name': 'name: Definition'}
FEATURES = [
{'display_name': 'Spectral Shape Characteristics',
'yaafe_name': 'sss',
'yaafe_definition': 'SpectralShapeStatistics',
'subfeatures': ['Spectral centroid', 'Spectral spread', 'Spectral kurtosis', 'Spectral skewness']
},
{'display_name': 'Temporal Shape Characteristics',
'yaafe_name': 'tss',
'yaafe_definition': 'TemporalShapeStatistics',
'subfeatures': ['Temporal centroid', 'Temporal spread', 'Temporal kurtosis', 'Temporal skewness']
},
{'display_name': 'ZCR',
'yaafe_name': 'zcr',
'yaafe_definition': 'ZCR',
'unit': 'Hz'
},
{'display_name': 'Energy',
'yaafe_name': 'energy',
'yaafe_definition': 'Energy',
},
{'display_name': 'Loudness',
'yaafe_name': 'loudness',
'yaafe_definition': 'Loudness',
},
{'display_name': 'Spectral rolloff',
'yaafe_name': 'spectral_rolloff',
'yaafe_definition': 'SpectralRolloff',
},
{'display_name': 'Perceptual sharpness',
'yaafe_name': 'perceptual_sharpness',
'yaafe_definition': 'PerceptualSharpness',
},
{'display_name': 'Perceptual spread',
'yaafe_name': 'perceptual_spread',
'yaafe_definition': 'PerceptualSpread',
},
{'display_name': 'Duration',
'unit': 's',
},
{'display_name': 'Sample rate',
'unit': 'Hz',
},
{'display_name': 'Spectral decrease',
'yaafe_name': 'spectral_decrease',
'yaafe_definition': 'SpectralDecrease',
},
{'display_name': "Spectral flatness",
'yaafe_name': 'spectral_flatness',
'yaafe_definition': 'SpectralFlatness',
},
# {'display_name': "Spectral flux",
# 'yaafe_name': 'spectral_flux',
# 'yaafe_definition': 'SpectralFlux',
# },
{'display_name': "Spectral slope",
'yaafe_name': 'spectral_slope',
'yaafe_definition': 'SpectralSlope',
},
# {'display_name': "Spectral variation",
# 'yaafe_name': 'spectral_variation',
# 'yaafe_definition': 'SpectralVariation',
# }
]
# Add features to extract
feature_plan = yf.FeaturePlan(sample_rate=sample_rate, resample=False)
for feature in FEATURES:
if 'yaafe_definition' in feature:
# YAAFE feature plans take definitions of the form: 'zcr: ZCR'
full_definition = feature['yaafe_name'] + ': ' + feature['yaafe_definition']
# Add the feature to the feature plan to be extracted
feature_plan.addFeature(full_definition)
# Configure an Engine
engine = yf.Engine()
engine.load(feature_plan.getDataFlow())
# Extract features
afp = yf.AudioFileProcessor()
afp.processFile(engine, audiofile_path)
# outputs dict format - {'Spectral centroid': [[2.33], [4.34],...[2.55]]}
outputs = {}
# Read and store output arrays to outputs dict
for feature in FEATURES:
if 'yaafe_definition' in feature: # Exclude duration and sample rate
output_name = feature['yaafe_name']
# If the feature has subfeatures, e.g. Spec shape stats
if 'subfeatures' in feature:
full_output = engine.readOutput(output_name)
for i, subfeature_display_name in enumerate(feature['subfeatures']):
outputs[subfeature_display_name] = full_output[:, i]
# If the feature has only 1 dimension(1 X T array)
else:
display_name = feature['display_name']
a = engine.readOutput(output_name) # 2D array
# Transpose data to make it a 1D array
outputs[display_name] = a.transpose()[0]
# Create YAAFE feature objects
feature_obj_list = []
for display_name in outputs.keys():
feature = find_dict_by_item(('display_name', display_name), FEATURES)
f, created = Feature.objects.get_or_create(
name=display_name.lower(),
display_name=display_name
)
if feature and ('unit' in feature):
f.unit = feature['unit']
f.save()
feature_obj_list.append(f)
# Create Sample rate and Duration objects
rate_obj, created = Feature.objects.get_or_create(name='sample rate')
if not rate_obj.unit:
rate_obj.unit = 'Hz'
rate_obj.save()
feature_obj_list.append(rate_obj)
duration_obj, created = Feature.objects.get_or_create(name='duration')
if not duration_obj.unit:
duration_obj.unit = 's'
duration_obj.save()
feature_obj_list.append(duration_obj)
# Associate features with instance
# for feature in feature_obj_list:
# inst.features.add(feature)
# If dataset has labels
if dataset.labels():
# NOTE: This assumes there's only one label name per dataset.
# Just indexes the first label name
label_name = dataset.labels()[0]
else:
# attach a placeholder LabelName called 'variable'
filtered = LabelName.objects.filter(name='variable')
# make sure that 'get' doesn't return an error if there are more than 1
# LabelName called 'variable'
if len(filtered) <= 1:
label_name, c = LabelName.objects.get_or_create(name='variable')
else:
label_name = filtered[0]
# Add a placeholder label value called "none" to instance
# This is necessary in order for plotting to work
filtered = LabelValue.objects.filter(value="none", label_name=label_name)
if len(filtered) <= 1:
no_label, c = LabelValue.objects.get_or_create(value="none",
label_name=label_name)
else:
no_label = filtered[0]
inst.label_values.add(no_label)
inst.save()
# Save output data and associate it with inst
for display_name, output in outputs.iteritems():
if output.size > 0: # Avoid empty data
for i in range(output[0].size):
output_mean = output[i].mean()
FeatureValue.objects.create(value=output_mean,
feature=Feature.objects.get(name__iexact=display_name.lower()),
instance=inst)
# Save sample_rate and duration data
FeatureValue.objects.create(value=sample_rate,
feature=Feature.objects.get(name='sample rate'),
instance=inst)
FeatureValue.objects.create(value=duration,
feature=Feature.objects.get(name='duration'),
instance=inst)
| sloria/sepal | sepal/datasets/tasks.py | Python | bsd-3-clause | 8,545 | 0.007373 |
import unittest
import hashlib
import httpsig.sign as sign
from httpsig.utils import parse_authorization_header
from requests.models import RequestEncodingMixin
class CrossPlatformTestCase(unittest.TestCase):
def test_content_md5(self):
data = {'signature': "HPMOHRgPSMKdXrU6AqQs/i9S7alOakkHsJiqLGmInt05Cxj6b/WhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk="}
assert RequestEncodingMixin._encode_params(data) == "signature=HPMOHRgPSMKdXrU6AqQs%2Fi9S7alOakkHsJiqLGmInt05Cxj6b%2FWhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk%3D"
assert hashlib.md5(RequestEncodingMixin._encode_params(data).encode("utf-8")).hexdigest() == "fdfc1a717d2c97649f3b8b2142507129"
def test_hmac(self):
hs = sign.HeaderSigner(key_id='pda', algorithm='hmac-sha256', secret='secret', headers=['(request-target)', 'Date'])
unsigned = {
'Date': 'today',
'accept': 'llamas'
}
signed = hs.sign(unsigned, method='GET', path='/path?query=123')
auth = parse_authorization_header(signed['authorization'])
params = auth[1]
self.assertIn('keyId', params)
self.assertIn('algorithm', params)
self.assertIn('signature', params)
self.assertEqual(params['keyId'], 'pda')
self.assertEqual(params['algorithm'], 'hmac-sha256')
self.assertEqual(params['signature'], 'SFlytCGpsqb/9qYaKCQklGDvwgmrwfIERFnwt+yqPJw=')
if __name__ == "__main__":
unittest.main()
| blocktrail/blocktrail-sdk-python | tests/cross_platform_test.py | Python | mit | 1,454 | 0.003439 |
"""Resource manage module."""
import os
from .utils import RequestUtil
class ResourceAPI(object):
"""Resource wechat api."""
ADD_TEMP_URI = ('https://api.weixin.qq.com/cgi-bin/media/'
'upload?access_token={}&type={}')
@classmethod
def upload(cls, path, token, rtype, upload_type='temp'):
"""Upload resource.
:path str: Resource local path
:token str: Wechat access token
:rtype str: Resource type such as image, voice ...
:upload_type: Upload type, Now support temp and forever
"""
if not os.path.exists(path):
return False
method = getattr(cls, '_upload_{}'.format(upload_type), None)
if method:
return method(path, token, rtype)
return False
@classmethod
def _upload_temp(cls, path, token, rtype):
"""Upload temp media to wechat server.
:path str: Upload entity local path
:token str: Wechat access token
:rtype str: Upload entity type
:Return dict:
"""
uri = cls.ADD_TEMP_URI.format(token, rtype)
resp = RequestUtil.upload(uri, {}, path)
return resp
| istommao/wechatkit | wechatkit/resource.py | Python | mit | 1,185 | 0 |
# coding=utf-8
from __future__ import unicode_literals, print_function
from flask import request, jsonify, url_for
from flask_login import current_user
import bugsnag
from . import load
from webhookdb.tasks.pull_request_file import spawn_page_tasks_for_pull_request_files
@load.route('/repos/<owner>/<repo>/pulls/<int:number>/files', methods=["POST"])
def pull_request_files(owner, repo, number):
"""
Queue tasks to load the pull request files (diffs) for a single pull request
into WebhookDB.
:statuscode 202: task successfully queued
"""
bugsnag_ctx = {"owner": owner, "repo": repo, "number": number}
bugsnag.configure_request(meta_data=bugsnag_ctx)
children = bool(request.args.get("children", False))
result = spawn_page_tasks_for_pull_request_files.delay(
owner, repo, number, children=children,
requestor_id=current_user.get_id(),
)
resp = jsonify({"message": "queued"})
resp.status_code = 202
resp.headers["Location"] = url_for("tasks.status", task_id=result.id)
return resp
| singingwolfboy/webhookdb | webhookdb/load/pull_request_file.py | Python | agpl-3.0 | 1,058 | 0.002836 |
import math
import pkg_resources
import itertools
import pandas as pd
import networkx as nx
from postman_problems.viz import add_node_attributes
from postman_problems.graph import (
read_edgelist, create_networkx_graph_from_edgelist, get_odd_nodes, get_shortest_paths_distances
)
from postman_problems.solver import rpp, cpp
# ###################
# PARAMETERS / DATA #
# ###################
EDGELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/edgelist_sleeping_giant.csv')
NODELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/nodelist_sleeping_giant.csv')
START_NODE = 'b_end_east'
#########
# TESTS #
#########
def test_read_sleeping_giant_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
# check that our Sleeping Giant example dataset contains the correct fields and values
assert ['node1', 'node2', 'trail', 'color', 'distance', 'estimate', 'required'] in df.columns.values
assert math.isclose(df[df['required'] == 1]['distance'].sum(), 26.01)
assert math.isclose(df['distance'].sum(), 30.48)
df_req = read_edgelist(EDGELIST, keep_optional=False)
assert math.isclose(df_req['distance'].sum(), 26.01)
assert 'req' not in df_req.columns
def test_create_networkx_graph_from_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
# check that our starting graph is created correctly
assert isinstance(graph, nx.MultiGraph)
assert len(graph.edges()) == 133
assert len(graph.nodes()) == 78
assert graph['b_end_east']['b_y'][0]['color'] == 'blue'
assert graph['b_end_east']['b_y'][0]['trail'] == 'b'
assert graph['b_end_east']['b_y'][0]['distance'] == 1.32
# check that starting graph with required trails only is correct
df_req = read_edgelist(EDGELIST, keep_optional=False)
graph_req = create_networkx_graph_from_edgelist(df_req, edge_id='id')
assert isinstance(graph_req, nx.MultiGraph)
assert len(graph_req.edges()) == 121
assert len(graph_req.nodes()) == 74
def test_add_node_attributes():
# create objects for testing
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
nodelist_df = pd.read_csv(NODELIST)
graph_node_attrs = add_node_attributes(graph, nodelist_df)
assert len(graph_node_attrs.nodes()) == 74
# check that each node attribute has an X and Y coordinate
for k, v in graph_node_attrs.nodes(data=True):
assert 'X' in v
assert 'Y' in v
# spot check node attributes for first node
node_data_from_graph = list(graph_node_attrs.nodes(data=True))
node_names = [n[0] for n in node_data_from_graph]
assert 'rs_end_north' in node_names
key = node_names.index('rs_end_north')
assert node_data_from_graph[key][1]['X'] == 1772
assert node_data_from_graph[key][1]['Y'] == 172
def test_get_shortest_paths_distances():
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
odd_nodes = get_odd_nodes(graph)
odd_node_pairs = list(itertools.combinations(odd_nodes, 2))
# coarsely checking structure of `get_shortest_paths_distances` return value
odd_node_pairs_shortest_paths = get_shortest_paths_distances(graph, odd_node_pairs, 'distance')
assert len(odd_node_pairs_shortest_paths) == 630
assert type(odd_node_pairs_shortest_paths) == dict
# check that each node name appears the same number of times in `get_shortest_paths_distances` return value
node_names = list(itertools.chain(*[i[0] for i in odd_node_pairs_shortest_paths.items()]))
assert set(pd.value_counts(node_names)) == set([35])
def test_nodelist_edgelist_overlap():
"""
Test that the nodelist and the edgelist contain the same node names. If using X,Y coordinates for plotting and
not all nodes have attributes, this could get messy.
"""
eldf = read_edgelist(EDGELIST, keep_optional=True)
nldf = pd.read_csv(NODELIST)
edgelist_nodes = set(eldf['node1'].append(eldf['node2']))
nodelist_nodes = set(nldf['id'])
nodes_in_el_but_not_nl = edgelist_nodes - nodelist_nodes
assert nodes_in_el_but_not_nl == set(), \
"Warning: The following nodes are in the edgelist, but not the nodelist: {}".format(nodes_in_el_but_not_nl)
nodes_in_nl_but_not_el = nodelist_nodes - edgelist_nodes
assert nodes_in_nl_but_not_el == set(), \
"Warning: The following nodes are in the nodelist, but not the edgelist: {}".format(nodes_in_nl_but_not_el)
def test_sleeping_giant_cpp_solution():
cpp_solution, graph = cpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(cpp_solution) == 155
# make sure our total mileage is correct
cpp_solution_distance = sum([edge[3]['distance'] for edge in cpp_solution])
assert math.isclose(cpp_solution_distance, 33.25)
# make sure our circuit begins and ends at the same place
assert cpp_solution[0][0] == cpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 121
[e[2].get('augmented') for e in graph.edges(data=True)].count(True) == 35
def test_sleeping_giant_rpp_solution():
rpp_solution, graph = rpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(rpp_solution) == 151
# make sure our total mileage is correct
rpp_solution_distance = sum([edge[3]['distance'] for edge in rpp_solution])
assert math.isclose(rpp_solution_distance, 32.12)
# make sure our circuit begins and ends at the same place
assert rpp_solution[0][0] == rpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 133
[e[3].get('augmented') for e in graph.edges(data=True, keys=True)].count(True) == 30
| brooksandrew/postman_problems | postman_problems/tests/test_example_sleeping_giant.py | Python | mit | 6,009 | 0.002663 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.