__id__
int64 3.09k
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
256
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 5
109
| repo_url
stringlengths 24
128
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 6.65k
581M
⌀ | star_events_count
int64 0
1.17k
| fork_events_count
int64 0
154
| gha_license_id
stringclasses 16
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
407
⌀ | gha_forks_count
int32 0
119
⌀ | gha_open_issues_count
int32 0
640
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 2
classes | gha_disabled
bool 1
class | content
stringlengths 9
4.53M
| src_encoding
stringclasses 18
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | year
int64 1.97k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,770,981,314,919 |
1b54e79daaefd6334695ab0a75cd27e358c56b52
|
f348f41de88c3c3a66f0e6edb25db65aeed666a4
|
/vmtools_report.py
|
9e9eb469fc4217c8ea6388d0456d8ab0e8213e0c
|
[] |
no_license
|
gfoxx/vmwaretools_report
|
https://github.com/gfoxx/vmwaretools_report
|
ccb1393fab48a141cda6a4478fab26d2330145e1
|
b3055175b73de8ebda7c0ca042c169b6cf1feaa4
|
refs/heads/master
| 2017-01-01T03:32:52.817101 | 2014-04-09T19:54:19 | 2014-04-09T19:54:19 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import logging, sys, re, getpass, argparse, pprint, csv, time
from pysphere import MORTypes, VIServer, VITask, VIProperty, VIMor, VIException
from pysphere.vi_virtual_machine import VIVirtualMachine
def print_verbose(message):
if verbose:
print message
def set_dir():
if directory:
return directory
else:
return '/tmp'
def set_filename(filename):
if filename:
return filename + getDateSuffix()
else:
logger.info('Using default filename vsphere-inventory')
return 'vmware-tools-report' + getDateSuffix()
def getDateSuffix():
return '_'+time.strftime("%Y-%m-%d")
def get_admin(annotation):
if annotation == "":
return "No annotations found"
else:
vmadminemail = re.findall(r'[\w\.-]+@', annotation)
if vmadminemail:
return vmadminemail[0]
else:
return "No VM admin"
def write_vmw_tools(con, logger, c, cnt):
# List properties to get
property_names = ['name', 'guest.toolsVersion', 'config.files.vmPathName','config.annotation']
try:
logger.debug('Retrieving properties %s' % property_names )
properties = con._retrieve_properties_traversal(property_names=property_names, obj_type="VirtualMachine")
for propset in properties:
name = ""
tools_version = ""
path = ""
admin = ""
ann = ""
for prop in propset.PropSet:
if prop.Name == "name":
name = prop.Val
elif prop.Name == "guest.toolsVersion":
tools_version = prop.Val
elif prop.Name == "config.files.vmPathName":
path = prop.Val
elif prop.Name == "config.annotation":
ann = prop.Val
admin = get_admin(ann)
if tools_version == "" or tools_version == '0':
tools_version = "No tools installed"
if notools:
if cnt is not None:
cnt.writerow([name, path, tools_version, admin])
else:
logger.error('Somehow CSV writer is not available.')
return
if c is not None:
c.writerow([name, path, tools_version, admin])
logger.debug(name+path+tools_version+admin)
else:
logger.error('Somehow CSV writer is not available.')
return
except VIException as e:
logger.error(e)
return
def get_args():
parser = argparse.ArgumentParser(description="Prints report of VMware tools version installed. ")
parser.add_argument('-s', '--server', nargs=1, required=True, help='The vCenter or ESXi server to connect to', dest='server', type=str)
parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the server', dest='username', type=str)
parser.add_argument('-p', '--password', nargs=1, required=False, help='The password with which to connect to the host. If not specified, the user is prompted at runtime for a password', dest='password', type=str)
parser.add_argument('-n', '--notools', required=False, help='No VMware Tools isntalled into separate file.', dest='notools', action='store_true')
parser.add_argument('-D', '--dir', required=False, help='Written file(s) into a specific directory.', dest='directory', type=str)
parser.add_argument('-f', '--filename', required=False, help='File name. If not set, will be used vmware-tools-report.', dest='filename', type=str)
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-l', '--log-file', nargs=1, required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-V', '--version', action='version', version="%(prog)s (version 0.3)")
args = parser.parse_args()
return args
# Parsing values
args = get_args()
argsdict = vars(args)
server = args.server[0]
username = args.username[0]
verbose = args.verbose
debug = args.debug
log_file = None
password = None
notools = args.notools
directory = args.directory
filename = args.filename
# Setting output filename
csvfile = set_filename(filename)
# Setting output directory
dir = set_dir()
if args.password:
password = args.password[0]
if args.logfile:
log_file = args.logfile[0]
# Logging settings
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
# Initializing logger
if log_file:
logfile = log_file + getDateSuffix() + '.log'
logging.basicConfig(filename=logfile,format='%(asctime)s %(levelname)s %(message)s',level=log_level)
logger = logging.getLogger(__name__)
else:
logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',level=log_level)
logger = logging.getLogger(__name__)
logger.debug('logger initialized')
# CSV formating file & Formatting
c = None
cnt = None
try:
csv_header = ["VM Name", "Datastore Path", "Vmware Tools Version", "Admin"]
c = csv.writer(open(dir+"/"+csvfile+".csv", "wb"))
c.writerow(csv_header)
pass
except IOException as e:
logger.error(e)
sys.exit()
# Enable additional file if "notools" flag is set
try:
if notools:
cnt = csv.writer(open(dir+"/"+csvfile+"-notools.csv","wb"))
cnt.writerow(csv_header)
pass
except IOException as e:
logger.error(e)
sys.exit()
# Asking Users password for server
if password is None:
logger.debug('No command line password received, requesting password from user')
password = getpass.getpass(prompt='Enter password for vCenter %s for user %s: ' % (server,username))
# Connecting to server
logger.info('Connecting to server %s with username %s' % (server,username))
con = VIServer()
try:
logger.debug('Trying to connect with provided credentials')
con.connect(server,username,password)
logger.info('Connected to server %s' % server)
logger.debug('Server type: %s' % con.get_server_type())
logger.debug('API version: %s' % con.get_api_version())
except VIException as ins:
logger.error(ins)
logger.debug('Loggin error. Program will exit now.')
sys.exit()
# getting report
write_vmw_tools(con, logger, c, cnt)
logger.warning("Written CSV file to %s " %dir)
#disconnecting
try:
con.disconnect()
logger.info('Disconnected to server %s' % server)
except VIException as e:
logger.error(e)
|
UTF-8
|
Python
| false | false | 2,014 |
14,405,320,343,598 |
2404248349ce170a630cb01a2dcaf8ce0144cd99
|
23156dc25f3ffb46fcedeffb9c6c5b330544d0ca
|
/typequiz/typing_test/models.py
|
dae29982a9059a773b18a1ab0deec438c6a73052
|
[] |
no_license
|
ckinsey/TypeQuiz
|
https://github.com/ckinsey/TypeQuiz
|
52131d83e918fcd9790e2356a3ec9e7461f9ff98
|
f5b90a650a84a67876a22710818d1fdb63e73f93
|
refs/heads/master
| 2021-01-19T06:27:34.640112 | 2013-01-14T03:49:28 | 2013-01-14T03:49:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.template.defaultfilters import slugify
# Create your models here.
class TypingTest(models.Model):
name = models.CharField(max_length=128)
test_body = models.TextField()
avg_time = models.IntegerField(blank=True, null=True)
slug = models.SlugField(blank=True, null=True)
credits = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.__str__()
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.slug = slugify(self.name)
super(TypingTest, self).save(*args, **kwargs)
class TestResult(models.Model):
user = models.ForeignKey('auth.User', blank=True, null=True)
date_created = models.DateTimeField(auto_now=True)
user_text = models.TextField(blank=True, default="")
total_chars = models.IntegerField()
total_time = models.IntegerField()
errors = models.IntegerField()
typing_test = models.ForeignKey(TypingTest)
def get_wpm(self):
minutes = float(self.total_time) / float(60)
words = self.total_chars / 5
return round(float(words) / minutes, 2)
def get_nwpm(self):
return round(self.get_wpm() - (2 * self.errors), 2)
def __unicode__(self):
return self.__str__()
def __str__(self):
return "Result #%d: %s" % (self.id, self.typing_test.name)
def save(self, *args, **kwargs):
# 1 sec minimum
if self.total_time == 0:
self.total_time = 1
super(TestResult, self).save(*args, **kwargs)
|
UTF-8
|
Python
| false | false | 2,013 |
11,467,562,704,806 |
8e9f096d22daf5422d38bd9d975bc8a5ea773980
|
c8e217ec55f3ebeef4da7999583878eec39f9404
|
/SALTLibs/RootModels/python/createNtuple.py~
|
39ee681bad18b100f0599945b86efc0bad5adff8
|
[] |
no_license
|
adendek/SALTLibs
|
https://github.com/adendek/SALTLibs
|
8d82e190ddec6cb49e4a139ab5da910dfa220fd0
|
2a85bf3472d12ffef1ebd31d7d448220a4ba794f
|
refs/heads/master
| 2016-07-28T03:42:43.288129 | 2013-08-26T02:30:20 | 2013-08-26T02:30:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/python
from ROOT import *
from array import array
def CreateNtuple(file):
vector=[]
file_in="data/txt/"+file
f=open(file_in)
file_out="data/root/"+file[:-3]+"root"
fout=TFile(file_out,"RECREATE")
tuple=TTree("tree","SALTData")
vector=std.vector(int)()
tuple.Branch("vector",'vector<int>',vector)
for line in f:
if line.split()[0][0]!='#' :
for i in line.split():
vector.push_back(int(i))
tuple.Fill()
vector.clear()
fout.Write()
fout.Close()
f.close()
def createReader(file):
file_in="data/root/"+file[:-3]+"root"
f=TFile(file_in)
tree=gDirectory.Get("tree")
|
UTF-8
|
Python
| false | false | 2,013 |
14,431,090,139,153 |
31abf3a7e2aa8b72d589a79fe8e9bb0234424ab7
|
be6d5ffe8541fbb37d65a58a812ce6857e20b275
|
/camera/Pilatus.py
|
fe51dc7cdeea89ab6306f0c4a64fedca2e5f71bb
|
[] |
no_license
|
gjover/Lima-tango
|
https://github.com/gjover/Lima-tango
|
a1a514c7dbe1240b878bbd7dee2ad987451cce9f
|
e251a22689b2e4e206a41b28a24e93da743842cb
|
refs/heads/master
| 2021-01-18T15:02:22.272668 | 2012-11-22T08:49:42 | 2012-11-22T08:49:42 | 6,594,649 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
############################################################################
# This file is part of LImA, a Library for Image Acquisition
#
# Copyright (C) : 2009-2011
# European Synchrotron Radiation Facility
# BP 220, Grenoble 38043
# FRANCE
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
############################################################################
#=============================================================================
#
# file : Pilatus.py
#
# description : Python source for the Pilatus and its commands.
# The class is derived from Device. It represents the
# CORBA servant object which will be accessed from the
# network. All commands which can be executed on the
# Pilatus are implemented in this file.
#
# project : TANGO Device Server
#
# copyleft : European Synchrotron Radiation Facility
# BP 220, Grenoble 38043
# FRANCE
#
#=============================================================================
# This file is generated by POGO
# (Program Obviously used to Generate tango Object)
#
# (c) - Software Engineering Group - ESRF
#=============================================================================
#
import PyTango
import sys
from Lima import Core
#==================================================================
# Pilatus Class Description:
#
#
#==================================================================
class Pilatus(PyTango.Device_4Impl):
#--------- Add you global variables here --------------------------
Core.DEB_CLASS(Core.DebModApplication, 'LimaCCDs')
#------------------------------------------------------------------
# Device constructor
#------------------------------------------------------------------
def __init__(self,cl, name):
PyTango.Device_4Impl.__init__(self,cl,name)
self.init_device()
self.__FillMode = {'ON':True,
'OFF':False}
self.__ThresholdGain = {'LOW' : 0,
'MID' : 1,
'HIGH' : 2,
'ULTRA HIGH' : 3}
#------------------------------------------------------------------
# Device destructor
#------------------------------------------------------------------
def delete_device(self):
pass
#------------------------------------------------------------------
# Device initialization
#------------------------------------------------------------------
def init_device(self):
self.set_state(PyTango.DevState.ON)
self.get_device_properties(self.get_device_class())
if self.TmpfsSize:
buffer = _PilatusIterface.buffer()
buffer.setTmpfsSize(self.TmpfsSize * 1024 * 1024)
#------------------------------------------------------------------
# getAttrStringValueList command:
#
# Description: return a list of authorized values if any
# argout: DevVarStringArray
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def getAttrStringValueList(self, attr_name):
valueList = []
dict_name = '_' + self.__class__.__name__ + '__' + ''.join([x.title() for x in attr_name.split('_')])
d = getattr(self,dict_name,None)
if d:
valueList = d.keys()
return valueList
#------------------------------------------------------------------
# sendCamserverCmd:
#
# Description: sends any camserver command.
#
# We need at least the mxsettings command to prepare the camserver:
# Description: sets the mxsettings. Documentation from camserver:
# Set crystallographic parameters reported in the image header.
#
# mxsettings [param_name value] [param_name value] ...
#
# List of availables param_name :
# Wavelength, Energy_range, Detector_distance, Detector_Voffset, Beam_xy,
# Beam_x, Beam_y, Flux, Filter_transmission, Start_angle, Angle_increment,
# Detector_2theta, Polarization, Alpha, Kappa, Phi, Phi_increment, Chi,
# Chi_increment, Oscillation_axis, N_oscillations, Start_position,
# Position_increment, Shutter_time, CBF_template_file
#
# Not settable with mxsettings, but provided to templates from detector
# settings:
# Timestamp, Exposure_period, Exposure_time, Count_cutoff,
# Compression_type, X_dimension, Y_dimension
#
# argin: DevString
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def sendCamserverCmd(self, cmd):
communication = _PilatusIterface.communication()
communication.send_CamserverCmd(cmd)
#==================================================================
#
# Pilatus read/write attribute methods
#
#==================================================================
#------------------------------------------------------------------
# Read threshold_gain attribute
#------------------------------------------------------------------
def read_threshold_gain(self, attr):
communication = _PilatusIterface.communication()
gain = communication.gain()
if gain is None:
gain = "not set"
else:
gain = _getDictKey(self.__ThresholdGain,gain)
attr.set_value(gain)
#------------------------------------------------------------------
# Write threshold_gain attribute
#------------------------------------------------------------------
def write_threshold_gain(self, attr):
data = []
attr.get_write_value(data)
gain = _getDictValue(self.__ThresholdGain,data[0])
communication = _PilatusIterface.communication()
threshold = communication.threshold()
communication.set_threshold_gain(threshold,gain)
#------------------------------------------------------------------
# Read threshold attribute
#------------------------------------------------------------------
def read_threshold(self, attr):
communication = _PilatusIterface.communication()
threshold = communication.threshold()
if threshold == None: # Not set
threshold = -1
attr.set_value(threshold)
#------------------------------------------------------------------
# Write threshold attribute
#------------------------------------------------------------------
def write_threshold(self, attr):
data = []
attr.get_write_value(data)
communication = _PilatusIterface.communication()
communication.set_threshold_gain(data[0])
#------------------------------------------------------------------
# Read energy_threshold attribute
#------------------------------------------------------------------
def read_energy_threshold(self, attr):
communication = _PilatusIterface.communication()
threshold = communication.threshold()
if threshold == None: # Not set
energy = -1
else:
energy = threshold * 2. # threshold is 60% of working energy
attr.set_value(energy)
#------------------------------------------------------------------
# Write energy_threshold attribute
#------------------------------------------------------------------
def write_energy_threshold(self, attr):
data = []
attr.get_write_value(data)
energy = data[0]
communication = _PilatusIterface.communication()
communication.set_energy(energy)
# threshold = energy * 600 # 60% of working energy
# if energy > 12 :
# gain = 0 # Low gain
# elif energy > 8 and energy <= 12 :
# gain = 1 # Mid gain
# elif energy >= 6 and energy <= 8:
# gain = 2 # high gain
# else:
# gain = 3 # Ultra high gain
# communication = _PilatusIterface.communication()
# communication.set_threshold_gain(threshold,gain)
#----------------------------------------------------------------------------
# Read delay attribute
#----------------------------------------------------------------------------
def read_trigger_delay(self,attr) :
communication = _PilatusIterface.communication()
delay = communication.hardware_trigger_delay()
attr.set_value(delay)
#----------------------------------------------------------------------------
# Write delay attribute
#----------------------------------------------------------------------------
def write_trigger_delay(self,attr) :
data = []
attr.get_write_value(data)
delay = data[0]
communication = _PilatusIterface.communication()
communication.set_hardware_trigger_delay(delay)
#----------------------------------------------------------------------------
# Read nb exposure per frame attribute
#----------------------------------------------------------------------------
def read_nb_exposure_per_frame(self,attr) :
communication = _PilatusIterface.communication()
nb_frames = communication.nb_exposure_per_frame()
attr.set_value(nb_frames)
#----------------------------------------------------------------------------
# Write nb exposure per frame attribute
#----------------------------------------------------------------------------
def write_nb_exposure_per_frame(self,attr) :
data = []
attr.get_write_value(data)
nb_frames = data[0]
communication = _PilatusIterface.communication()
communication.set_nb_exposure_per_frame(nb_frames)
#----------------------------------------------------------------------------
# Read nb exposure per frame attribute
#----------------------------------------------------------------------------
def read_nb_first_image(self,attr) :
communication = _PilatusIterface.communication()
first = communication.getFirstImgNumber()
attr.set_value(first)
#----------------------------------------------------------------------------
# Write nb exposure per frame attribute
#----------------------------------------------------------------------------
def write_nb_first_image(self,attr) :
data = []
attr.get_write_value(data)
first = data[0]
communication = _PilatusIterface.communication()
communication.setFirstImgNumber(first)
#------------------------------------------------------------------
# Read gapfill attribute
#------------------------------------------------------------------
def read_fill_mode(self, attr):
communication = _PilatusIterface.communication()
gapfill = communication.gapfill()
gapfill = _getDictKey(self.__FillMode,gapfill)
attr.set_value(gapfill)
#------------------------------------------------------------------
# Write gapfill attribute
#------------------------------------------------------------------
def write_fill_mode(self, attr):
data = []
attr.get_write_value(data)
gapfill = _getDictValue(self.__FillMode,data[0])
communication = _PilatusIterface.communication()
communication.set_gapfill(gapfill)
#------------------------------------------------------------------
# Read camstatus attribute
#------------------------------------------------------------------
def read_cam_state(self, attr):
StateDict = {0:"ERROR",1:"DISCONNECTED",2:"OK",3:"SETTING_THRESHOLD",4:"SETTING_EXPOSURE",5:"SETTING_NB_IMAGE_IN_SEQUENCE",6:"SETTING_EXPOSURE_PERIOD",7:"SETTING_HARDWARE_TRIGGER_DELAY",8:"SETTING_EXPOSURE_PER_FRAME",9:"KILL_ACQUISITION",10:"RUNNING"}
communication = _PilatusIterface.communication()
status = communication.status()
attr.set_value(StateDict[status])
#==================================================================
#
# Pilatus command methods
#
#==================================================================
#==================================================================
#
# PilatusClass class definition
#
#==================================================================
class PilatusClass(PyTango.DeviceClass):
# Class Properties
class_property_list = {
}
# Device Properties
device_property_list = {
'TmpfsSize' :
[PyTango.DevInt,
"Size of communication temp. filesystem (in MB)",0],
}
# Command definitions
cmd_list = {
'getAttrStringValueList':
[[PyTango.DevString, "Attribute name"],
[PyTango.DevVarStringArray, "Authorized String value list"]],
'sendCamserverCmd':
[[PyTango.DevString, "Camserver command to send"],
[PyTango.DevVoid, "No answer"]],
}
# Attribute definitions
attr_list = {
'threshold_gain':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'threshold':
[[PyTango.DevFloat,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'energy_threshold':
[[PyTango.DevFloat,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'fill_mode':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'trigger_delay':
[[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'nb_exposure_per_frame':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'nb_first_image':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ_WRITE]],
'cam_state':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ]],
}
#------------------------------------------------------------------
# PilatusClass Constructor
#------------------------------------------------------------------
def __init__(self, name):
PyTango.DeviceClass.__init__(self, name)
self.set_type(name)
def _getDictKey(dict, value):
try:
ind = dict.values().index(value)
except ValueError:
return None
return dict.keys()[ind]
def _getDictValue(dict, key):
try:
value = dict[key.upper()]
except KeyError:
return None
return value
#----------------------------------------------------------------------------
# Plugins
#----------------------------------------------------------------------------
from Lima.Pilatus import Interface
_PilatusIterface = None
def get_control(**keys) :
global _PilatusIterface
if _PilatusIterface is None:
_PilatusIterface = Interface.Interface()
ct = Core.CtControl(_PilatusIterface)
_PilatusIterface.setCtSavingLink(ct.saving())
return ct
def close_interface() :
global _PilatusIterface
if _PilatusIterface is not None:
_PilatusIterface.quit()
_PilatusIterface = None
def get_tango_specific_class_n_device() :
return PilatusClass,Pilatus
|
UTF-8
|
Python
| false | false | 2,012 |
4,020,089,430,708 |
78657d0e58e928482d1faf449a134747150dab74
|
318ef479de961c7e86239901d57f2952299ef518
|
/traceback.py
|
bf272f74e9dfd11b53b1e6612657b8b38f0f866c
|
[
"GPL-2.0-only"
] |
non_permissive
|
scutLaoYi/software_architect
|
https://github.com/scutLaoYi/software_architect
|
e963adcf32bb219226dc9c540f5286d51c3104ad
|
50584d8c66c3829cdaa380cb14fc7529b29c712f
|
refs/heads/master
| 2021-01-02T22:45:39.110231 | 2013-11-29T12:25:34 | 2013-11-29T12:25:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/var/bin/python
# -*- coding:cp936 -*-
#Filename : returnStyle.py
import os
import sys
import checkOk
import timeit
solutionCount = [0,]
def search(level, totalLevel, gameMap):
u"""
搜索主函数,
遍历当前层的所有位置,
找出可能位置并继续下层搜索
"""
global solutionCount
if level == totalLevel:
#当前为最后一行,搜索所有位置,找到合法位置即是可能解,记录
for i in range(1, totalLevel + 1):
gameMap[level] = i
if checkOk.isOk(gameMap, level, gameMap[level]):
solutionCount[0] += 1
return
else:
for i in range(1, totalLevel + 1):
#当前不是最后一行,搜索所有位置,找到合法位置继续向下搜索
gameMap[level] = i
if checkOk.isOk(gameMap, level, gameMap[level]):
search(level + 1, totalLevel, gameMap)
if __name__ == '__main__':
u"""
软件架构实验:N皇后问题--回溯法
time: 2013-10-31 version:0.1b1
creator: scutLaoYi
language: Python 2.7
"""
totalLevel = int(sys.argv[1])
totalTimes = int(sys.argv[2])
print u"N皇后问题,回溯风格,N =", totalLevel
time = timeit.timeit(
"""
gameMap = range(0, totalLevel+1)
solutionCount[0] = 0
search(level = 1, totalLevel = totalLevel, gameMap = gameMap)
""",
setup = 'from __main__ import search, totalLevel, solutionCount',
number = totalTimes)
print u'测试次数:', totalTimes
print u'总用时:', time
print u'平均用时:',time / totalTimes
print u"解法总数 =",solutionCount[0]
|
GB18030
|
Python
| false | false | 2,013 |
19,000,935,329,219 |
cb71af68240664c6d89997e463e160a3cdb12c72
|
b5b7b47c7fa552881aafc591dfd1b97e6914f724
|
/analysis/constants.py
|
c2c539d48f3d83e910108afc1243a494048d6615
|
[] |
no_license
|
t1mur/odysseus
|
https://github.com/t1mur/odysseus
|
e8f3f4648b18bb42ba0be1a38b30c330efee69b7
|
4b8f144315b66dd5a5d834f6373d2f0a5652004b
|
refs/heads/master
| 2020-05-20T11:05:00.000835 | 2013-07-15T19:19:25 | 2013-07-15T19:19:25 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
h = 6.62606896e-34
hbar = h/(2*np.pi)
kb = 1.3806504e-23 # Boltzmann's constant
a0 = 0.52917720859e-10 # Bohr radius
mu_B = 9.27400949e-24 # Bohr magneton
mp = 1.672621637e-27 # proton mass
me = 9.10938215e-31 # electron mass
c0 = 299792458 # speed of light in vacuum
g = 9.80665 # constant of gravity
mu0 = 4*np.pi*1e-7 # magnetic constant
eps0 = 8.854187817e-12 # electric constant
e0 = 1.602176487e-19 # electron charge
|
UTF-8
|
Python
| false | false | 2,013 |
4,166,118,303,221 |
6886524abdd1e3b70c25675bd3ef69d8894f7838
|
680185d233bdc0a1b2f404923d69e1d2e5b94d9d
|
/rambler/net/url_protocols/URLMockHTTPProtocol.py
|
32b926d075890c77a8d6d4678bb26b10370ccc4e
|
[] |
no_license
|
pombredanne/rambler.net
|
https://github.com/pombredanne/rambler.net
|
e0c5d36d3495d85fa07edbaa2c52c6ce69c2ae70
|
065d5ec4d1eee086b0b37910e3a6887ae748d83e
|
refs/heads/master
| 2020-12-31T02:49:31.666102 | 2012-04-12T18:24:58 | 2012-04-12T18:24:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from Rambler import component, nil
from rambler.net.controllers.URLProtocol import URLProtocol
from rambler.net.URLResponse import URLResponse
class URLMockHTTProtocol(URLProtocol):
"""Implements a protocol for urls in the form of mock-http://<some url path>/
This component is used for testing other components that normaly operate on HTTP urls.
Simply substitute a http:// url for mock-http://...
"""
test = nil
# Q. Why am i using a class variable for this?
count = 0
@classmethod
def canInitWithRequest(cls, request):
if request.url.scheme == 'mock-http':
return True
def startLoading(self):
self.__class__.count = 0
request = self.request
#self.test.assertEquals(1024, self.request.HTTPHeaders['content-length'])
request.HTTPBodyStream.observer = self
request.HTTPBodyStream.read(100)
def stopLoading(self):
pass
def end_of_data_for(self, stream):
stream.delegate = None
bytes = str(self.__class__.count)
response = URLResponse(self.request.url, 'text/plain', len(bytes), '?')
self.client.didReceiveResponse(self, response, self.client.URLCacheStorageNotAllowed)
self.client.didLoadData(self, bytes)
self.client.didFinishLoading(self)
self.test.quit()
stream.close()
def onRead(self, stream, data):
"""Called when bytes from the stream our available. They'll be copied to the Port"""
self.__class__.count += len(data)
# keep reading until close
stream.read(100)
|
UTF-8
|
Python
| false | false | 2,012 |
11,012,296,155,941 |
4f49aeea6e842ebc8c3c2e71c437b982a5a6755e
|
d4cd933b403fc7196d372d46d0d7a9af831c7597
|
/nmhooks/tests/conftest.py
|
87a08ecbba8f5134e03b25750d350fb4714333c0
|
[
"GPL-1.0-or-later"
] |
non_permissive
|
romanofski/nmhooks
|
https://github.com/romanofski/nmhooks
|
06c488d82eeff3bbdb43bc5d28fdf3281408cd8b
|
1dac3450dd413520601d301ec7403099835acfcf
|
refs/heads/master
| 2016-09-06T17:34:14.474050 | 2014-07-28T23:33:16 | 2014-07-28T23:33:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pytest
import notmuch
import shutil
import os
@pytest.fixture
def db(tmpdir):
"""
Creates a notmuch database.
"""
src = os.path.join(os.path.dirname(__file__), 'mails')
for item in os.listdir(src):
shutil.copytree(os.path.join(src, item),
os.path.join(str(tmpdir), item))
db = notmuch.Database(str(tmpdir), create=True)
for mail in os.listdir(str(tmpdir.join('cur'))):
db.add_message(os.path.join(str(tmpdir.join('cur')), mail))
return db
|
UTF-8
|
Python
| false | false | 2,014 |
8,383,776,202,808 |
36e011ba59f7ccd42ac9ba92858a2cb36004c1f5
|
153ecce57c94724d2fb16712c216fb15adef0bc4
|
/book/trunk/smileyutility/localtheme.py
|
4014acf56d8b7d555d41553be53b781b972686f1
|
[] |
no_license
|
pombredanne/zope
|
https://github.com/pombredanne/zope
|
10572830ba01cbfbad08b4e31451acc9c0653b39
|
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
|
refs/heads/master
| 2018-03-12T10:53:50.618672 | 2012-11-20T21:47:22 | 2012-11-20T21:47:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Local Smiley Theme Implementation
$Id$
"""
__docformat__ = 'restructuredtext'
from zope.component.exceptions import ComponentLookupError
from zope.interface import implements
from zope.app import zapi
from zope.app.container.btree import BTreeContainer
from zope.app.component.localservice import queryNextService
from zope.app.file.image import Image
from interfaces import ISmileyTheme, ISmiley, ILocalSmileyTheme
class Smiley(Image):
implements(ISmiley)
class SmileyTheme(BTreeContainer):
"""A local smiley theme implementation.
>>> from zope.app.tests import setup
>>> from zope.app.utility.utility import LocalUtilityService
>>> site = setup.placefulSetUp()
>>> rootFolder = setup.buildSampleFolderTree()
Setup a simple function to add local smileys to a theme.
>>> import os
>>> import book.smileyutility
>>> def addSmiley(theme, text, filename):
... base_dir = os.path.dirname(book.smileyutility.__file__)
... filename = os.path.join(base_dir, filename)
... theme[text] = Smiley(open(filename, 'r'))
Create components in root folder
>>> site = setup.createServiceManager(rootFolder)
>>> utils = setup.addService(site, zapi.servicenames.Utilities,
... LocalUtilityService())
>>> theme = setup.addUtility(site, 'plain', ISmileyTheme, SmileyTheme())
>>> addSmiley(theme, ':)', 'smileys/plain/smile.png')
>>> addSmiley(theme, ':(', 'smileys/plain/sad.png')
Create components in `folder1`
>>> site = setup.createServiceManager(rootFolder['folder1'])
>>> utils = setup.addService(site, zapi.servicenames.Utilities,
... LocalUtilityService())
>>> theme = setup.addUtility(site, 'plain', ISmileyTheme, SmileyTheme())
>>> addSmiley(theme, ':)', 'smileys/plain/biggrin.png')
>>> addSmiley(theme, '8)', 'smileys/plain/cool.png')
Now test the single smiley accessor methods
>>> from zope.publisher.browser import TestRequest
>>> from zope.app.component.localservice import setSite
>>> from book.smileyutility import getSmiley, querySmiley
>>> setSite(rootFolder)
>>> getSmiley(':)', TestRequest(), 'plain')
'http://127.0.0.1/++etc++site/default/plain/%3A%29'
>>> getSmiley(':(', TestRequest(), 'plain')
'http://127.0.0.1/++etc++site/default/plain/%3A%28'
>>> getSmiley('8)', TestRequest(), 'plain')
Traceback (most recent call last):
...
ComponentLookupError: 'Smiley not found.'
>>> querySmiley('8)', TestRequest(), 'plain', 'nothing')
'nothing'
>>> setSite(rootFolder['folder1'])
>>> getSmiley(':)', TestRequest(), 'plain')
'http://127.0.0.1/folder1/++etc++site/default/plain/%3A%29'
>>> getSmiley(':(', TestRequest(), 'plain')
'http://127.0.0.1/++etc++site/default/plain/%3A%28'
>>> getSmiley('8)', TestRequest(), 'plain')
'http://127.0.0.1/folder1/++etc++site/default/plain/8%29'
>>> getSmiley(':|', TestRequest(), 'plain')
Traceback (most recent call last):
...
ComponentLookupError: 'Smiley not found.'
>>> querySmiley(':|', TestRequest(), 'plain', 'nothing')
'nothing'
Let's now test the `getSmileysMapping()` method. To do that we create a
small helper method that helps us compare dictionaries.
>>> from pprint import pprint
>>> from book.smileyutility import getSmileysMapping
>>> def output(dict):
... items = dict.items()
... items.sort()
... pprint(items)
>>> setSite(rootFolder)
>>> output(getSmileysMapping(TestRequest(), 'plain'))
[(u':(', 'http://127.0.0.1/++etc++site/default/plain/%3A%28'),
(u':)', 'http://127.0.0.1/++etc++site/default/plain/%3A%29')]
>>> setSite(rootFolder['folder1'])
>>> output(getSmileysMapping(TestRequest(), 'plain'))
[(u'8)', 'http://127.0.0.1/folder1/++etc++site/default/plain/8%29'),
(u':(', 'http://127.0.0.1/++etc++site/default/plain/%3A%28'),
(u':)', 'http://127.0.0.1/folder1/++etc++site/default/plain/%3A%29')]
>>> getSmileysMapping(TestRequest(), 'foobar')
Traceback (most recent call last):
...
ComponentLookupError: \
(<InterfaceClass book.smileyutility.interfaces.ISmileyTheme>, 'foobar')
>>> setup.placefulTearDown()
"""
implements(ILocalSmileyTheme)
def getSmiley(self, text, request):
"See book.smileyutility.interfaces.ISmileyTheme"
smiley = self.querySmiley(text, request)
if smiley is None:
raise ComponentLookupError, 'Smiley not found.'
return smiley
def querySmiley(self, text, request, default=None):
"See book.smileyutility.interfaces.ISmileyTheme"
if text not in self:
theme = queryNextTheme(self, zapi.name(self))
if theme is None:
return default
else:
return theme.querySmiley(text, request, default)
return getURL(self[text], request)
def getSmileysMapping(self, request):
"See book.smileyutility.interfaces.ISmileyTheme"
theme = queryNextTheme(self, zapi.name(self))
if theme is None:
smileys = {}
else:
smileys = theme.getSmileysMapping(request)
for name, smiley in self.items():
smileys[name] = getURL(smiley, request)
return smileys
def queryNextTheme(context, name, default=None):
"""Get the next theme higher up.
>>> from zope.app.tests import setup
>>> from zope.app.utility.utility import LocalUtilityService
>>> site = setup.placefulSetUp()
>>> rootFolder = setup.buildSampleFolderTree()
Create various themes at various sites, so that we can efficiently test
the implementation.
>>> site = setup.createServiceManager(rootFolder)
>>> utils = setup.addService(site, zapi.servicenames.Utilities,
... LocalUtilityService())
>>> r_plain = setup.addUtility(site, 'plain', ISmileyTheme, SmileyTheme())
>>> r_yazoo = setup.addUtility(site, 'yazoo', ISmileyTheme, SmileyTheme())
>>> site = setup.createServiceManager(rootFolder['folder1'])
>>> utils = setup.addService(site, zapi.servicenames.Utilities,
... LocalUtilityService())
>>> f1_plain = setup.addUtility(site, 'plain', ISmileyTheme, SmileyTheme())
>>> f1_kmess = setup.addUtility(site, 'kmess', ISmileyTheme, SmileyTheme())
>>> site = setup.createServiceManager(rootFolder['folder1']['folder1_1'])
>>> utils = setup.addService(site, zapi.servicenames.Utilities,
... LocalUtilityService())
>>> f11_kmess = setup.addUtility(site, 'kmess', ISmileyTheme, SmileyTheme())
>>> f11_yazoo = setup.addUtility(site, 'yazoo', ISmileyTheme, SmileyTheme())
Now we are ready to test.
>>> queryNextTheme(f11_kmess, 'kmess') is f1_kmess
True
>>> queryNextTheme(f1_kmess, 'kmess') is None
True
>>> queryNextTheme(f11_yazoo, 'yazoo') is r_yazoo
True
>>> queryNextTheme(r_yazoo, 'kmess') is None
True
>>> queryNextTheme(f1_plain, 'plain') is r_plain
True
>>> queryNextTheme(r_plain, 'plain') is None
True
"""
theme = default
while theme is default:
utilities = queryNextService(context, zapi.servicenames.Utilities)
if utilities is None:
return default
theme = utilities.queryUtility(ISmileyTheme, name, default)
context = utilities
return theme
def getURL(smiley, request):
"""Get the URL of the smiley."""
url = zapi.getView(smiley, 'absolute_url', request=request)
return url()
|
UTF-8
|
Python
| false | false | 2,012 |
17,016,660,448,226 |
d6527eea730809e0e3f99952195d998ed480962e
|
69cf3170ad356ba682ccb0d8d789fa6bd59af368
|
/cloudlb/errors.py
|
876d7ba124ee948a1edd9da3a5dc9736d5c4106d
|
[
"MIT"
] |
permissive
|
rackerlabs/python-cloudlb
|
https://github.com/rackerlabs/python-cloudlb
|
cce5a3279268247b63df43d22738050e2c647512
|
56874e7009235e52152fa1eac16ef98a553f792c
|
refs/heads/master
| 2023-08-29T02:44:47.344268 | 2014-06-05T20:22:05 | 2014-06-05T20:22:05 | 1,411,706 | 3 | 0 | null | false | 2014-06-05T19:57:45 | 2011-02-25T17:16:28 | 2014-06-05T19:57:44 | 2014-06-05T19:57:45 | 372 | 41 | 23 | 8 |
Python
| null | null |
# -*- encoding: utf-8 -*-
__author__ = "Chmouel Boudjnah <chmouel@chmouel.com>"
import cloudlb.consts
class CloudlbException(Exception): pass
class ResponseError(CloudlbException):
"""
Raised when the remote service returns an error.
"""
def __init__(self, status, reason):
self.status = status
self.reason = reason
Exception.__init__(self)
def __str__(self):
return '%d: %s' % (self.status, self.reason)
def __repr__(self):
return '%d: %s' % (self.status, self.reason)
class RateLimit(ResponseError):
"""
Raised when too many requests have been made
of the remote service in a given time period.
"""
status = 413
def __init__(self, wait):
self.wait = wait
self.reason = "Account is currently above limit, please wait %s seconds." % (wait)
Exception.__init__(self)
class AbsoluteLimit(ResponseError):
"""
Raised when an absolute limit is reached. Absolute limits include the
number of load balancers in a region, the number of nodes behind a load
balancer.
"""
status = 413
def __init__(self, reason):
self.reason = reason
Exception.__init__(self)
class BadRequest(ResponseError):
"""
Raised when the request doesn't match what was anticipated.
"""
pass
# Immutable and Unprocessable Entity are both 422 errors, but have slightly different meanings
class ImmutableEntity(ResponseError):
pass
class UnprocessableEntity(ResponseError):
pass
class InvalidRegion(CloudlbException):
"""
Raised when the region specified is invalid
"""
regions = cloudlb.consts.REGION.values() + cloudlb.consts.REGION.keys()
def __init__(self, region):
self.region = region
Exception.__init__(self)
def __str__(self):
return 'Region %s not in active region list: %s' % (self.region, ', '.join(self.regions))
def __repr__(self):
return 'Region %s not in active region list: %s' % (self.region, ', '.join(self.regions))
class InvalidProtocol(CloudlbException):
"""
Raised when the protocol specified is invalid
"""
pass
class AuthenticationFailed(ResponseError):
"""
Raised on a failure to authenticate.
"""
pass
class NotFound(ResponseError):
"""
Raised when there the object wasn't found.
"""
pass
class InvalidLoadBalancerName(CloudlbException):
def __init__(self, reason):
self.reason = reason
Exception.__init__(self)
def __str__(self):
return '%s' % (self.reason)
def __repr__(self):
return '%s' % (self.reason)
|
UTF-8
|
Python
| false | false | 2,014 |
3,032,246,955,719 |
802cc367452a049d992240b00af4f1cdefefe527
|
940d7b4949892977804f24aa26a65fe47c824658
|
/rosalind/revc.py
|
22cb999d4b15558376b6a45e56eb082ad6f4fb0b
|
[] |
no_license
|
anaximander/rosalind-solutions
|
https://github.com/anaximander/rosalind-solutions
|
a1b6bb5d93d4eb8c4e5b0c8c807caf1b6931f9c2
|
18a3aeec91376b3dc9a83984226856e259c0651a
|
refs/heads/master
| 2016-08-07T02:31:04.283620 | 2013-10-19T20:50:11 | 2013-10-19T20:50:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from constants import BP_COMPLEMENTS
def get_reverse_complement(genetic_string):
reverse_bps = "".join(list(reversed(genetic_string)))
complement = "".join([BP_COMPLEMENTS[item] for item in reverse_bps])
print complement + "\n"
return complement
|
UTF-8
|
Python
| false | false | 2,013 |
7,559,142,451,619 |
4692babcc15fc217ac739b39c2f40c6e5e305e8c
|
499f9e2f9367fe0c8a8bc4612d63ca7da59413bb
|
/ng/hack/hack_page2.py
|
a799159975ed726ffbba17d472a19e6516ca30bd
|
[] |
no_license
|
peterrenshaw/ng
|
https://github.com/peterrenshaw/ng
|
b261c7ec35eb0fb8bc9674248d6cfe0db64e7443
|
72d1bdb39a4d6fe247f2e4f936c5e76d3950f729
|
refs/heads/master
| 2020-06-05T02:26:28.337346 | 2013-11-08T03:19:07 | 2013-11-08T03:19:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# ~*~ encoding: utf-8 ~*~
import os
import time
import datetime
from string import Template
# --- time tools ---
#---
# dt_ymdhm2_epoch: pass in y/m/d/h:m spits out epoch
#---
def dt_ymdhm2_epoch(year,month,day,hour,minute): # TODO add optional seconds
"""return datetime in epoch format defined by y,m,d,h,m"""
t = datetime.datetime(year,month,day,hour,minute)
return time.mktime(t.timetuple())
#---
# dt_epoch_utc: epoch, utc
#---
def dt_epoch_utc():
"""return datetime in UTC epoch format"""
t = datetime.datetime.utcnow()
return time.mktime(t.timetuple())
#---
# dt_datetime_strf: format return using strf strings
#---
def dt_datetime_strf(strf_format, is_upper=False):
"""datetime formatted using STRF string format"""
dt = datetime.datetime.now().strftime(strf_format)
if is_upper: dt = dt.upper()
return dt
#
# --- end time tools ---
# ---- Container object ---
#
#===
# name: Container
# date: 2013OCT24
# prog: pr
# desc: using only list and dictionary, sort the list of
# dictionary values by key. The object allows you to
# add multiple dictionaries of data (test) then sort by
# existing key. Add a dictionary, every dictionary after
# this must have the same keys, this is enforced on add.
#
# funky :)
#===
class Container:
def __init__(self):
"""initialise variables"""
self.index = []
def add(self, **kwargs):
"""
enter multiple key=value items, convert to dict
save to master index - clear dict for next add
First add will dicate all other keys allowed
"""
data = {}
if kwargs:
for key in kwargs:
# is key valid ie: is key found
# in first add? are sucessive add's
# have same keys as first?
if self.valid(key):
data[key] = kwargs[key]
else:
return False
self.index.append(data)
return True
return False
def valid(self, key):
"""
is every keys entered exactly same as
keys in first add? if so T, else F
"""
if len(self.index) >= 1:
if key in self.index[0].keys():
return True
else:
return False
else:
return True
def sort(self, term, order=True):
"""return copy of list of sorted items by key or F"""
# is *search term* in first item, of index?
if term in self.index[0]:
# force *order* to T/F
if order is True or order is False:
# sort all dicts in list, by term and order
items = sorted(self.all(),
key = lambda data: data[term],
reverse = order)
return items
return False
def clear(self):
"""clears index list"""
self.index = []
def all(self):
"""all index data in list"""
return self.index
#
# --- end container object ---
# --- Page object ---
#
#===
# name: Page
# date: 2013OCT21
# prog: pr
# desc: model a Page to be saved
#===
class Page:
def __init__(self, is_index):
"""initialise the Page"""
self.is_index = (True if is_index else False)
self.index = [] # list of index data for rendering
self.prod_name = "nextgen" # pass in
self.prod_version = "0.1" # pass in
# basic page
self.__header = "" # template
self.__footer = "" # template
self.__body = "" # container for body
# --- data types ---
# list of dictionaries used to store below - use for checking
self.dtypes = ['body','file','header','image','index','meta','time']
# body
self.body_data = dict(title="", # page title
abstract="", # summary, <140 char
body="", # entire text
template="") # template for body
# file
self.file_data = dict(basepath="", # root path
relpath="", # path relative to base
name="", # filename
ext="", # filename extension
fullpath="") # full filepath and filename.ext
# time
self.time_data = dict(dt_epoch="", # epoch
year="", # year YYYY
month="", # mmm or mm
mmm="", # mmm == JAN, FEB, etc
mm="", # mm == 01, 02, etc, zero pad
day="", # 01, 02, etc zero pad
hour="", # hour in HH format, zero pad
minute="", # minute in MM fomat, zero pad
dt_strf="") # string formatted date time
# meta
self.meta_data = dict(author="", # author name
site_name="", # name of site
site_byline="", # site tag line
prod_name="", # name of product
prod_version="", # product version no
tags=[], # tags per Page
is_index=self.is_index) # is this an index?
# image
self.image_data = dict(img_url="", # url to image for linking
img_src="", # source url to image display
img_height="", # image attribute
img_width="") # image attribute
#
# --- end data types ---
# --- mapping ---
# header
self.header_map = dict(author="",
site="",
site_byline="",
title="",
abstract="",
year="",
mmm="",
mm="",
day="",
tool="",
version="")
# body
self.body_map = dict(title="",
site="",
abstract="",
description="",
body="",
year="",
mmm="",
mm="",
day="",
img_url="",
img_src="",
img_height="",
img_width="",
dt_strf="")
# index
self.index_map = dict(title="",
abstract="",
file_path="",
year="",
mmm="",
day="",
hour="",
minute="",
dt_strf="",
dt_epoch="")
# --- end mapping ---
# --- collect data ---
def header(self, content):
"""content for Page header"""
if content:
self.__header = content
return True
return False
def body(self, title, abstract, content, template):
"""content for Page body"""
if title:
if abstract:
if content:
if template:
title = self.q_body('title', data=title, is_set=True)
abstract = self.q_body('abstract', data=abstract, is_set=True)
body = self.q_body('body', data=content, is_set=True)
template = self.q_body('template', data=template, is_set=True)
return (title and abstract and body and template)
return False
def footer(self, content):
"""footer content, set or F"""
if content:
self.__footer = content
return True
return False
def filedata(self, basepath, name, ext, relpath=""):
"""
build page filename from paths, name & ext. remember
that 'relpath' is optional so deal with this.
"""
if os.path.isdir(basepath):
if ext in ['html','htm','txt']:
if name:
base = self.q_file('basepath', data=basepath, is_set=True)
name = self.q_file('name', data=name, is_set=True)
ext = self.q_file('ext', data=ext, is_set=True)
if relpath: # optional
rel = self.q_file('relpath', data=relpath, is_set=True)
status = (base and rel and name and ext)
else:
status = (base and name and ext)
if status:
self.filepath()
return True
else:
return False
return False
def filepath(self):
"""
build full filepath with filename and extension
remember! this is building web server filepath but
it could also be used on win32 so use 'os.path.join'
also remember 'optional' relpath assume 'name'
and 'ext' valid.
"""
if self.q_file('basepath'):
if self.q_file('name') and self.q_file('ext'):
fn = "%s.%s" % (self.q_file('name'), self.q_file('ext'))
if self.q_file('relpath'):
fullpath = os.path.join(self.q_file('basepath'),
self.q_file('relpath'), fn)
else:
fullpath = os.path.join(self.q_file('basepath'), fn)
return self.q_file('fullpath', data=fullpath, is_set=True)
return False
def imagedata(self, source, url, height=375, width=500):
"""collect image data for main image, default HxW"""
if source:
if url:
source = self.q_image(key='img_src', data=source, is_set=True)
url = self.q_image(key='img_url', data=url, is_set=True)
height = self.q_image(key='img_height', data=height, is_set=True)
width = self.q_image(key='img_width', data=width, is_set=True)
return (source and url and height and width)
return False
def metadata(self, **kwargs):
"""
lots of metadata available, pass in as
keyword arguments foo="bar", foobar="", bar=foo
test input keys against meta_data dict, return
true, assign data to key if not empty
"""
for arg in kwargs:
if arg in self.meta_data.keys():
status = self.q_meta(key=arg, data=kwargs[arg], is_set=True)
if not status:
return False
return True
def timedata(self, **kwargs):
"""
like metadata there's a lot of time data of which epoch is the
quickest and easiest to input and derive other time info. Only
use UTC for epoch unless you know exactly where you are. The rest
can be found in self.time_data
"""
for arg in kwargs:
if arg in self.time_data.keys():
status = self.q_time(key=arg, data=kwargs[arg], is_set=True)
if not status:
return False
return True
# --- end collect ---
# --- get data ---
#
def query(self, key, store, data, is_set):
"""return or update dictionary data by type or F"""
if key in store: # can we update by key?
if is_set: # do we want to update?
if data:
store[key] = data # update
return True
else:
return False
else:
return store[key] # get query
else:
return False
def marshall(self, dtype, key, data="", is_set=False):
"""determine which store, query and return or update data by type or F"""
result = False
if dtype in self.dtypes:
if dtype == 'body':
if key in self.body_data:
result = self.query(key, self.body_data, data, is_set)
elif dtype == 'file':
if key in self.file_data:
result = self.query(key, self.file_data, data, is_set)
elif dtype == 'image':
if key in self.image_data:
result = self.query(key, self.image_data, data, is_set)
elif dtype == 'time':
if key in self.time_data:
result = self.query(key, self.time_data, data, is_set)
elif dtype == 'meta':
if key in self.meta_data:
result = self.query(key, self.meta_data, data, is_set)
else:
pass
return result
#---
# convenience methods for marshall->query
#---
def q_meta(self, key, data="", is_set=False):
return self.marshall('meta', key, data, is_set)
def q_file(self, key, data="", is_set=False):
return self.marshall('file', key, data, is_set)
def q_body(self, key, data="", is_set=False):
return self.marshall('body', key, data, is_set)
def q_image(self, key, data="", is_set=False):
return self.marshall('image', key, data, is_set)
def q_time(self, key, data="", is_set=False):
return self.marshall('time', key, data, is_set)
# --- end get data ---
# --- render ---
#
# template
def build_template(self, template, data):
"""
given template and data, return
substituted string with data
"""
if template: # has template?
if data: # has data?
data_rendered = []
data_raw = template.split('\n')
for line in data_raw:
t = Template(line)
render = str(t.substitute(data))
data_rendered.append(render)
return data_rendered
return False
# --- render ---
# page
def render_header(self):
"""given header data and template, substitute data for placeholders"""
# remember: the dict keys are related to <header.html>
# key holders in the template
header_map = dict(author=self.q_meta('author'),
site=self.q_meta('site_name'),
site_byline=self.q_meta('site_byline'),
title=self.q_body('title'),
abstract=self.q_body('abstract'),
year=self.q_time('year'),
mmm=self.q_time('mmm'),
mm=self.q_time('mm'),
day=self.q_time('day'),
tool=self.q_meta('tool'),
version=self.q_meta('version'))
header = self.build_template(self.__header, header_map)
return header
def render_body(self):
"""given data and template, substitute data for placeholders"""
# remember: the dict keys are related to <content.html>
# key holders in the template
body_map = dict(title=self.q_body('title'),
site_name=self.q_meta('site_name'),
abstract=self.q_body('abstract'),
description=self.q_body('description'),
body=self.q_body('body'),
year=self.q_time('year'),
mmm=self.q_time('mmm'),
mm=self.q_time('mm'),
day=self.q_time('day'),
img_url=self.q_image('img_url'),
img_src=self.q_image('img_src'),
img_height=self.q_image('img_height'),
img_width=self.q_image('img_width'),
dt_strf=self.q_time('dt_strf'))
# grab the template, pass in the map, spit out the final body content
body = self.build_template(self.q_body('template'), body_map)
return body
def render_footer(self):
footer = "%s" % self.__footer
return footer
# all
def render(self):
header = self.render_header()
body = self.render_body()
footer = self.render_footer()
try:
with open(self.q_file('fullpath'),'wt') as f:
# header
for line in header:
f.write(line)
f.write("\n")
# body
for line in body:
f.write(line)
f.write("\n")
# footer
if footer:
f.write(footer)
f.close()
except:
header = ""
body = ""
footer = ""
return False
else:
return True
# --- end render
#---
# main:
#---
def main():
destination = 'E:\\blog\\seldomlogical'
# input variables
author = "Peter Renshaw"
site = "Seldomlogical"
site_byline = "new ideas, ideal solutions are seldom logical. attaining a desired goal always is"
title = "Hello world"
abstract = "A quick hello world hack. Not much too look at but you have to start somewhere."
img_url = "http://www.flickr.com/photos/bootload/7419372302/"
img_src = "http://farm9.staticflickr.com/8154/7419372302_f34e56a94c.jpg"
img_height = "375"
img_width = "500"
# "Thursday, 19 July 2012 09:41"
str_full = "%A, %d %B %Y %H:%M"
dt_full = dt_datetime_strf(str_full)
str_24hour = dt_datetime_strf("%H")
str_minute = dt_datetime_strf("%M")
dt_epoch = dt_epoch_utc()
is_index=True
header = ""
hp = os.path.join(os.curdir, 'source', 'partials', 'header.html')
with open(hp, encoding='utf-8') as f:
header = f.read()
f.close()
footer = ""
fp = os.path.join(os.curdir, 'source', 'partials', 'footer.html')
with open(fp, encoding='utf-8') as f:
footer = f.read()
f.close()
tpl = ""
tp = os.path.join(os.curdir, 'source', 'partials', 'content.html')
with open(tp, encoding='utf-8') as f:
tpl = f.read()
f.close()
content = ""
cp = os.path.join(os.curdir, 'source', 'page.txt')
with open(cp, encoding='utf-8') as f:
content = f.read()
f.close()
print('start')
p = Page(is_index)
p.header(header)
p.footer(footer)
p.imagedata(source=img_src,
url=img_url,
height=img_height,
width=img_width)
p.timedata(year="2013",
mm="10",
mmm="OCT",
day="17",
dt_strf=dt_full,
dt_epoch=dt_epoch,
hour=str_24hour,
minute=str_minute)
p.filedata(basepath=destination,name="index", ext="html")
p.metadata(tags=['tag1','tag2'],
author=author,
site_name=site,
site_byline=site_byline)
p.body(title=title, abstract=abstract, content=content,template=tpl)
p.render()
print('ok')
#---
# main app entry point
#---
if __name__ == '__main__':
main()
# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab
|
UTF-8
|
Python
| false | false | 2,013 |
18,708,877,567,565 |
450cf23decb31059ba76c078bf2dd5b168605535
|
d73fb7bb272e30fa4a2b91be9308f2ddde9d009e
|
/SimplePrograms/right_justify.py
|
6add3ea6bff143680f9f366d86d203ffe06af339
|
[] |
no_license
|
biwin/Python
|
https://github.com/biwin/Python
|
797562c488ec3effcf2c1b2e1dac932ac4aff144
|
e79a89aa62f0866fb8dfb778038bc7e49ee3b6c2
|
refs/heads/master
| 2015-08-13T05:06:27.980261 | 2014-10-02T18:27:57 | 2014-10-02T18:27:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'payload'
def right_justify(name):
total_allocated = 50
name_length = len(name)
total_remaining = total_allocated - name_length
print(" " * total_remaining + name)
right_justify("hi dude")
|
UTF-8
|
Python
| false | false | 2,014 |
6,682,969,141,584 |
26eef23fb389cbcbe63bae3d38441ec42cf3bf32
|
5c1a26f958ccee2c16f4c0bf4489b4195ef0661b
|
/output-scripts/allTS-sanityCheck.py
|
50461cd6b176a5f164a46125e4a0378e29af4b94
|
[] |
no_license
|
jmeehan16/streambench
|
https://github.com/jmeehan16/streambench
|
c707d1f9d369fa984117b5445c6770ae3aca8baf
|
a9aad274810a39be2d78e26ff6236b0ed66cf07a
|
refs/heads/master
| 2020-04-15T01:31:15.303528 | 2013-12-19T10:14:30 | 2013-12-19T10:14:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
import sys
import math
with open(sys.argv[1]) as f:
lines = f.readlines()
with open(sys.argv[2]) as f2:
output = f.readlines()
found = False
for line in lines:
for o in output:
o = o.replace("(","")
o = o.replace(")","")
ts = line.split(",")[0]
if ts == line:
found = True
break
if found == False:
print line
found = False
|
UTF-8
|
Python
| false | false | 2,013 |
4,904,852,690,479 |
b374b29f3ef88ec0815d90c9367dc1b35eb6caa7
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Vaqta_25E_susp_for_injecion_SmPC.py
|
d1ffc590cc26dad82481b507c7b856f605c10ed2
|
[] |
no_license
|
urudaro/data-ue
|
https://github.com/urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
{'_data': [['Unknown',
[['Blood',
u'Very rarely, thrombocytopenia. Post-marketing Safety Study In a post-marketing safety study, a total of 12,523 individuals 2 through 17 years of age received 1 or 2 doses of VAQTA. There was no serious, vaccine-related, adverse event identified. There was no nonserious, vaccine-related, adverse event resulting in outpatient visits.']]]],
'_pages': [5, 7],
u'_rank': 1,
u'_type': u'LSFU'}
|
UTF-8
|
Python
| false | false | 2,013 |
2,233,383,002,669 |
72b8b09d7cf61c2ab8b421984c95374eb8eadff1
|
b9c4c9812355e2844e91cdc36f5ad8e40807127f
|
/RemoteObject.py
|
b58428f7ab62f29c038adb1ad9c4ea4f45e6025f
|
[] |
no_license
|
stevenqzhang/PyPad
|
https://github.com/stevenqzhang/PyPad
|
55c0839dfb64fba7697af3863a12b1f682d621bb
|
183ca454d80d2e45b610f18c4776bfdfc250c0eb
|
refs/heads/master
| 2020-05-18T05:18:56.106475 | 2013-11-01T02:41:39 | 2013-11-01T02:41:39 | 14,033,390 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import Pyro.core, Pyro.util, Pyro.naming
import sys
import threading
import socket
import os
import signal
# change me as appropriate when you run the app!
default_ns_host = '192.168.150.1'
class MyThread(threading.Thread):
"""this is a wrapper for threading.Thread that improves
the syntax for creating and starting threads.
"""
def __init__(self, target, *args):
threading.Thread.__init__(self, target = target, args = args)
self.start()
class Watcher:
"""this class solves two problems with multithreaded
programs in Python, (1) a signal might be delivered
to any thread (which is just a malfeature) and (2) if
the thread that gets the signal is waiting, the signal
is ignored (which is a bug).
The watcher is a concurrent process (not thread) that
waits for a signal and then kills the process that contains the
active threads. See Appendix A of The Little Book of Semaphores.
I have only tested this on Linux. I would expect it to
work on OS X and not work on Windows.
"""
def __init__(self, callback = None):
""" Creates a child thread, which returns. The parent
thread waits for a KeyboardInterrupt and then kills
the child thread.
"""
self.child = os.fork()
if self.child == 0:
return
else:
self.watch(callback)
def watch(self, callback = None):
try:
os.wait()
except KeyboardInterrupt:
# I put the capital B in KeyBoardInterrupt so I can
# tell when the Watcher gets the SIGINT
if callback:
callback()
print 'KeyBoardInterrupt'
self.kill()
sys.exit()
def kill(self):
try:
os.kill(self.child, signal.SIGKILL)
except OSError: pass
def get_ip_addr():
port = 9090
"""get the real IP address of this machine"""
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect((default_ns_host, port))
(addr, port) = csock.getsockname()
csock.close()
return addr
class NameServer:
"""the NameServer object represents the name server running
on a remote host and provides methods for interacting with it"""
def __init__(self, ns_host=default_ns_host):
"""locate the name server on the given host"""
self.ns_host = ns_host
self.ns = Pyro.naming.NameServerLocator().getNS(ns_host)
def get_proxy(self, name):
"""look up a remote object by name and create a proxy for it"""
try:
uri = self.ns.resolve(name)
except Pyro.errors.NamingError:
type, value, traceback = sys.exc_info()
print 'Pyro NamingError:', value
sys.exit(1)
return Pyro.core.getProxyForURI(uri)
def query(self, name, group = None):
"""check whether the given name is registered in the given group.
return 1 if the name is a remote object, 0 if it is a group,
and -1 if it doesn't exist."""
t = self.ns.list(group)
for k, v in t:
if k == name:
return v
return -1
def create_group(self, name):
"""create a group with the given name"""
self.ns.createGroup(name)
def get_remote_object_list(self, prefix = '', group = None):
"""return a list of the remote objects in the given group
that start with the given prefix"""
t = self.ns.list(group)
u = [s for (s, n) in t if n == 1 and s.startswith(prefix)]
return u
def clear(self, prefix = '', group = None):
"""unregister all objects in the given group that start
with the given prefix"""
t = self.ns.list(group)
print t
for (s, n) in t:
if not s.startswith(prefix): continue
if n == 1:
if group:
s = '%s.%s' % (group, s)
print s
self.ns.unregister(s)
class RemoteObject(Pyro.core.ObjBase):
"""objects that want to be available remotely should inherit
from this class, and either (1) don't override __init__ or
(2) call RemoteObject.__init__ explicitly"""
def __init__(self, name = None, ns = None):
Pyro.core.ObjBase.__init__(self)
if name == None:
name = 'remote_object' + str(id(self))
self.name = name
if ns == None:
ns = NameServer()
self.connect(ns, name)
def connect(self, ns, name):
"""connect to the given name server with the given name"""
# create the daemon (the attribute is spelled "demon" to
# avoid a name collision)
addr = get_ip_addr()
self.demon = Pyro.core.Daemon(host=addr)
self.demon.useNameServer(ns.ns)
# instantiate the object and advertise it
try:
print 'Connecting remote object', name
self.uri = self.demon.connect(self, name)
except Pyro.errors.NamingError:
print 'Pyro NamingError: name already exists or is illegal'
sys.exit(1)
return self.name
def requestLoop(self):
"""run the request loop until an exception occurs"""
try:
self.demon.requestLoop()
except:
self.cleanup()
if sys.exc_type != KeyboardInterrupt:
raise sys.exc_type, sys.exc_value
def cleanup(self):
"""remove this object from the name server"""
print 'Shutting down remote object', self.name
try:
self.demon.disconnect(self)
except KeyError:
print "tried to remove a name that wasn't on the name server"
self.stopLoop()
self.demon.shutdown()
def threadLoop(self):
"""run the request loop in a separate thread"""
self.thread = threading.Thread(target = self.stoppableLoop)
self.thread.start()
def stoppableLoop(self):
"""run handleRequests until another thread clears self.running"""
self.running = 1
try:
while self.running:
self.demon.handleRequests(0.1)
finally:
self.cleanup()
def stopLoop(self):
"""if threadLoop is running, stop it"""
self.running = 0
def join(self):
"""wait for the threadLoop to complete"""
if hasattr(self, 'thread'):
self.thread.join()
def main(script, name = 'remote_object', group = 'test', *args):
# find the name server
ns = NameServer()
# if it doesn't have a group named test, make one
if ns.query(group) == -1:
print 'Making group %s...' % group
ns.create_group(group)
# create a remote object and connect it
full_name = '%s.%s' % (group, name)
server = RemoteObject(full_name, ns)
# confirm that the group and object are on the name server
print group, ns.query(group)
print full_name, ns.query(name, group)
print group, ns.get_remote_object_list(group=group)
# create a Watcher and then run the server loop in a thread
watcher = Watcher(server.cleanup)
child = MyThread(client_code, full_name, server)
server.stoppableLoop()
print 'Server done.'
def client_code(full_name, server):
# get a proxy for this object
# and invoke a method on it
ns = NameServer()
proxy = ns.get_proxy(full_name)
print proxy.__hash__()
# stop the server
server.stopLoop()
server.join()
# child thread completes
print 'Thread complete.'
if __name__ == '__main__':
main(*sys.argv)
|
UTF-8
|
Python
| false | false | 2,013 |
12,472,585,074,351 |
d418cedf3e5cbf363b62643b05bf0cadc0387bb2
|
0fcf6eafab50771bb0e6886fff64b6b09b0f443c
|
/mosaic.py
|
a1ba1cbb257651cae70577b133fea0d227d83f84
|
[] |
no_license
|
rhythm92/SomeImageFilterWithPython
|
https://github.com/rhythm92/SomeImageFilterWithPython
|
00907cb837a4848745e8aa1f103d85a011234391
|
9d6c5e75edba1809530f1b09abd278b62c0b106e
|
refs/heads/master
| 2020-06-23T20:24:09.296704 | 2012-09-17T03:24:20 | 2012-09-17T03:24:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#coding=utf-8
'''
Created on 2011-7-8
@author: Chine
'''
from PIL import Image
def mosaic(img, block_size):
'''
@效果:马赛克
@param img: instance of Image
@param block_size: 方块大小,范围[1, 32]
@return: instance of Image
'''
block_size = min(max(block_size, 1), 32)
if img.mode != "RGBA":
img = img.convert("RGBA")
width, height = img.size
pix = img.load()
dst_img = Image.new("RGBA", (width, height))
dst_pix = dst_img.load()
for w in xrange(0, width, block_size):
for h in xrange(0, height, block_size):
r_sum, g_sum, b_sum = 0, 0, 0
size = block_size ** 2
for i in xrange(w, min(w+block_size, width)):
for j in xrange(h, min(h+block_size, height)):
r_sum += pix[i, j][0]
g_sum += pix[i, j][1]
b_sum += pix[i, j][2]
r_ave = int(r_sum / size)
g_ave = int(g_sum / size)
b_ave = int(b_sum / size)
for i in xrange(w, min(w+block_size, width)):
for j in xrange(h, min(h+block_size, height)):
dst_pix[i, j] = r_ave, g_ave, b_ave, pix[w, h][3]
return dst_img
if __name__ == "__main__":
import sys, os, time
path = os.path.dirname(__file__) + os.sep.join(['', 'images', 'lam.jpg'])
block_size = 10
if len(sys.argv) == 2:
try:
block_size = int(sys.argv[1])
except ValueError:
path = sys.argv[1]
elif len(sys.argv) == 3:
path = sys.argv[1]
block_size = int(sys.argv[2])
start = time.time()
img = Image.open(path)
img = mosaic(img, block_size)
img.save(os.path.splitext(path)[0]+'.mosaic.png', 'PNG')
end = time.time()
print 'It all spends %f seconds time' % (end-start)
|
UTF-8
|
Python
| false | false | 2,012 |
16,956,530,921,348 |
ae0bd1ed80afa05c0901622925a397027cd15f35
|
8f922e66276578e84ae04292b7c1e7835c111288
|
/app.py
|
2d5f8412789016f8d211a69c56ef1b61a5517fbd
|
[
"MIT"
] |
permissive
|
rcarmo/dash
|
https://github.com/rcarmo/dash
|
947a3205c4d14dcc899f4cfe639b0366bdd75c4a
|
2813c4176503f1f85c6afe6ee3531369451eb275
|
refs/heads/master
| 2021-01-20T21:29:56.739701 | 2014-04-26T18:08:36 | 2014-04-26T18:08:36 | 9,357,995 | 5 | 1 | null | true | 2014-04-26T18:08:36 | 2013-04-10T23:13:26 | 2014-04-26T18:08:36 | 2014-04-26T18:08:36 | 1,536 | 0 | 2 | 5 |
Python
| null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: et:ts=4:sw=4:
"""
Main application script
Created by: Rui Carmo
License: MIT (see LICENSE.md for details)
"""
import os, sys, json, logging
# Make sure our bundled libraries take precedence
sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib'))
import utils, bottle
from config import settings
log = logging.getLogger()
if os.path.dirname(__file__):
os.chdir(os.path.dirname(__file__))
if settings.reloader:
if "BOTTLE_CHILD" not in os.environ:
log.debug("Using reloader, spawning first child.")
else:
log.debug("Child spawned.")
# Bind routes
if not settings.reloader or ("BOTTLE_CHILD" in os.environ):
log.info("Setting up application.")
import routes
if __name__ == "__main__":
log.info("Serving requests.")
bottle.run(
port = settings.http.port,
host = settings.http.bind_address,
debug = settings.debug,
reloader = settings.reloader
)
else:
log.info("Running under uWSGI")
app = bottle.default_app()
|
UTF-8
|
Python
| false | false | 2,014 |
19,164,144,112,945 |
15ce46549228313f76fb98834eddfd775ce9e4d7
|
0ea92f8b3d2a4d0298fefaff9ca3b15fd3efdb9c
|
/king_snake/figures/knight.py
|
b6ea74cd7efee7298c5808095b122b3d333f4a8c
|
[
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
erget/KingSnake
|
https://github.com/erget/KingSnake
|
04e990e94576e77aa98bbaf39c3aacece27f1840
|
3992d493e8d74a277905c2aafc610e86dcbe060c
|
refs/heads/master
| 2021-01-15T10:50:57.693331 | 2013-06-16T16:43:36 | 2013-06-16T16:43:36 | 10,626,963 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""A knight chess piece."""
from .figure import Figure
class Knight(Figure):
"""A knight chess piece."""
start_position = {"white": ("B1", "G1"),
"black": ("B8", "G8")}
def __str__(self):
if self.color == "white":
string = "♘"
else:
string = "♞"
return string
@property
def legal_moves(self):
"""Return legal moves from current position."""
moves = []
steps = {"above": ("above_left", "above_right"),
"below": ("below_left", "below_right"),
"to_left": ("above_left", "below_left"),
"to_right": ("above_right", "below_right")}
for first_step in steps.keys():
for second_step in steps[first_step]:
first_pos = getattr(self.position, first_step)()
if first_pos:
second_pos = getattr(first_pos, second_step)()
if second_pos:
moves.append(second_pos)
return moves
|
UTF-8
|
Python
| false | false | 2,013 |
1,108,101,593,828 |
9f3b8e96c62d0e04d99f725094c50ea981684bcc
|
9737be7001ab6151815262124e3dcb4f24447ed3
|
/imoveis/admin.py
|
0217ff99bb56494117f098a22d6b34325430d0a1
|
[] |
no_license
|
hlrossato/webimob
|
https://github.com/hlrossato/webimob
|
2712d5d1fdad9faf1f5fa55b52db067b7af30c03
|
5cfb3892f86024a1c08491c5b706ba1d94736a6d
|
refs/heads/master
| 2020-04-11T16:55:28.002839 | 2014-12-07T13:52:08 | 2014-12-07T13:52:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import CadastroImoveis, FotosImoveis
class FotosImoveisInline(admin.TabularInline):
model = FotosImoveis
extra = 4
class CadastroImoveisAdmin(admin.ModelAdmin):
list_display = ('titulo', 'cidade', 'estado', 'imagemAdmin', 'ativo')
search_fields = ('titulo', 'cidade',)
list_filter = ['ativo', 'estado', 'destaque', 'possui_suite']
save_on_top = True
list_per_page = 20
prepopulated_fields = {'slug': ("titulo",)}
inlines = [FotosImoveisInline,]
admin.site.register(CadastroImoveis, CadastroImoveisAdmin)
|
UTF-8
|
Python
| false | false | 2,014 |
4,853,313,047,313 |
f96d3b98e86718b2bfe6e7c772a15f4bd345b11f
|
0d37d1fc520ce1c0ae85fd27ddfd3fb28590d1f7
|
/profile_gd_predict/cmp-profile.py
|
9dec4daf6461aa7f6635761191f5068ccc7b7947
|
[
"ISC"
] |
permissive
|
wac/meshop
|
https://github.com/wac/meshop
|
11ebd5e5a0e3d8e51f94a1500dd627c335cfbba0
|
ea5703147006e5e85617af897e1d1488e6f29f32
|
refs/heads/master
| 2016-09-10T12:27:54.915662 | 2012-09-30T09:24:37 | 2012-09-30T09:24:37 | 63,038 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import math
# Assume if x is too small that it is at least 1e-323
def safelog(x):
if x < 1e-323:
return -743.74692474082133 # math.log(1e-323)
return math.log(x)
def usage():
print sys.argv[0], " <disease-profiles> <gene-profiles>"
print "Compare disease profiles to gene-profiles"
print "Input format (field1|field2|coc|count1|count2|Total-count2|p|tfidf)"
print "coc = x = white balls drawn from urn"
print "count1 = k = number of balls drawn from the urn"
print "count2 = m = number of white balls"
print "Total-count2 = n = number of black balls"
print "p = p-value"
print "tfidf = term frequency-inverse document frequency"
print ""
print "Output format (disease|gene|D|G|I|U|L2_count|L2_count_Norm|L2_p|L2_logp|Intersect_L2_count_Norm|Intersect_L2_logp|sumdiff_logp|sum_logcombinedp|cosine_count_Norm|cosine_p|cosine_tfidf)"
print "D : number of disease terms"
print "G : number of gene terms"
print "I : number of intersecting (common) terms"
print "U : number of union (all) terms"
print "L2_count: L2 Distance, raw term instances"
print "L2_count_Norm: L2 Distance, term instances normalised by total term instances (term fractions)"
print "L2_p : L2 Distance, hypergeometric p values"
print "L2_logp : L2 Distance, log p values"
print "Intersect_L2_count_Norm : Intersecting terms, L2 Distance, normalised counts"
print "Intersect_L2_logp : Intersecting terms, L2 Distance, log p values"
print "sumdiff_logp : Sum of differences, log p values"
print "sum_logcombinedp : Sum, combined p value"
print "cosine_count_Norm: Cosine Distance of normalised counts"
print "cosine_p: Cosine Distance of p-values"
print "cosine_tfidf: Cosine Distance of tf-idf values"
sep='|'
def main():
if len(sys.argv) < 3:
usage()
sys.exit(-2)
global sep
# Currently compute L2 raw distance (probably useless), normalised (profile shape)
# Open Disease File
dprofile_raw = {}
dprofile_norm = {}
dprofile_pval = {}
dprofile_tfidf = {}
currterm=''
dtotal=0.0
print "# disease|gene|D|G|I|U|L2_count|L2_count_Norm|L2_p|L2_logp|Intersect_L2_count_Norm|Intersect_L2_logp|sumdiff_logp|sum_logcombinedp|cosine_count_Norm|cosine_p|cosine_tfidf"
disease_file=open(sys.argv[1], 'r')
for line in disease_file:
if line[0] == '#':
continue
tuple=line.strip().split(sep)
dterm=tuple[0]
dterm2=tuple[1]
dcount=int(tuple[2])
dpval=float(tuple[6])
dtfidf=float(tuple[7])
if not(currterm):
currterm = dterm
if not(currterm==dterm):
process_dterm(currterm, dprofile_raw, dtotal, dprofile_pval, dprofile_tfidf)
# Reset Disease profile
dprofile_raw = {}
dprofile_pval = {}
dprofile_tfidf = {}
dtotal=0.0
currterm=dterm
# Build profile
dtotal=dtotal+dcount
dprofile_raw[dterm2]=dcount
dprofile_pval[dterm2]=dpval
dprofile_tfidf[dterm2]=dtfidf
# Process the last one
process_dterm(currterm, dprofile_raw, dtotal, dprofile_pval, dprofile_tfidf)
def compare_gdterm(currgene, currterm, dprofile_raw, dtotal, dprofile_norm, dprofile_pval, dprofile_tfidf, cosine_norm_dmag, cosine_p_dmag, cosine_tfidf_dmag, gprofile_raw, gtotal, gprofile_norm, gprofile_pval, gprofile_tfidf):
cosine_norm_gmag=0.0
cosine_p_gmag=0.0
cosine_tfidf_gmag=0.0
# Compute normalised
for key in gprofile_raw:
gprofile_norm[key]=float(gprofile_raw[key]) / gtotal
cosine_norm_gmag=cosine_norm_gmag+(gprofile_norm[key]*gprofile_norm[key])
cosine_p_gmag=cosine_p_gmag+(gprofile_pval[key]*gprofile_pval[key])
cosine_tfidf_gmag=cosine_tfidf_gmag+(gprofile_tfidf[key]*gprofile_tfidf[key])
cosine_norm_gmag=math.sqrt(cosine_norm_gmag)
cosine_p_gmag=math.sqrt(cosine_p_gmag)
cosine_tfidf_gmag=math.sqrt(cosine_tfidf_gmag)
# Print Profiles
pdist_raw=0
pdist_norm=0.0
pdist_pval=0.0
pdist_logpval=0.0
ipdist_norm=0.0
ipdist_logpval=0.0
sumdiff_logp=0.0
sum_logcombinedp=0.0
cosine_norm=0.0
cosine_p=0.0
cosine_tfidf=0.0
profile_raw=dprofile_raw.copy()
profile_norm=dprofile_norm.copy()
profile_pval=dprofile_pval.copy()
profile_logpval=dprofile_pval.copy()
iprofile_logpval = {}
iprofile_norm = {}
for key in profile_logpval:
profile_logpval[key]=safelog(profile_logpval[key])
for key in gprofile_raw:
if key in profile_raw:
iprofile_norm[key] = profile_norm[key] - gprofile_norm[key]
iprofile_logpval[key] = profile_logpval[key] - safelog(gprofile_pval[key])
profile_raw[key] = profile_raw[key] - gprofile_raw[key]
profile_norm[key] = profile_norm[key] - gprofile_norm[key]
profile_pval[key] = profile_pval[key] - gprofile_pval[key]
profile_logpval[key] = profile_logpval[key] - safelog(gprofile_pval[key])
cosine_norm = cosine_norm+(dprofile_norm[key]*gprofile_norm[key])
cosine_p = cosine_p+(dprofile_pval[key]*gprofile_pval[key])
cosine_tfidf = cosine_tfidf+(dprofile_tfidf[key]*gprofile_tfidf[key])
else:
profile_raw[key] = gprofile_raw[key]
profile_norm[key] = gprofile_norm[key]
profile_pval[key]= 1.0 - gprofile_pval[key]
profile_logpval[key]= safelog(gprofile_pval[key])
cosine_p = cosine_p+gprofile_pval[key]
for key in dprofile_raw:
if not key in gprofile_raw:
profile_pval[key] = 1.0 - dprofile_pval[key]
cosine_p = cosine_p + dprofile_pval[key]
ucount = 0
for key in profile_raw:
ucount = ucount + 1
pdist_raw = pdist_raw + (profile_raw[key]**2)
pdist_norm = pdist_norm + (profile_norm[key]**2)
pdist_pval = pdist_pval + (profile_pval[key]**2)
pdist_logpval = pdist_logpval + (profile_logpval[key]**2)
sumdiff_logp = sumdiff_logp + abs(profile_logpval[key])
icount = 0
for key in iprofile_logpval:
icount = icount + 1
ipdist_norm=ipdist_norm + (iprofile_norm[key]**2)
ipdist_logpval=ipdist_logpval + (iprofile_logpval[key]**2)
sum_logcombinedp = sum_logcombinedp + safelog(dprofile_pval[key] + gprofile_pval[key] - (dprofile_pval[key] * gprofile_pval[key]))
pdist_raw = pdist_raw ** (0.5)
# Max dist is 2.0
pdist_norm = (pdist_norm ** (0.5)) / 2.0
pdist_pval = pdist_pval ** (0.5)
pdist_logpval = pdist_logpval ** (0.5)
cosine_norm = cosine_norm/( cosine_norm_gmag * cosine_norm_dmag )
if ( cosine_p_gmag * cosine_p_dmag == 0):
sys.stderr.write("Profile computation Error ("+currterm+sep+currgene+"): cosine_p_gmag ("+str(cosine_p_gmag)+") * cosine_p_dmag ("+str(cosine_p_dmag)+") == 0\n")
sys.exit(1)
cosine_p = cosine_p/( cosine_p_gmag * cosine_p_dmag )
cosine_tfidf = cosine_tfidf/( cosine_tfidf_gmag * cosine_tfidf_dmag )
print currterm+sep+currgene+sep+str(len(dprofile_raw))+sep+str(len(gprofile_raw))+sep+str(icount)+sep+str(ucount)+sep+str(pdist_raw)+sep+str(pdist_norm)+sep+str(pdist_pval)+sep+str(pdist_logpval)+sep+str(ipdist_norm)+sep+str(ipdist_logpval)+sep+str(sumdiff_logp)+sep+str(sum_logcombinedp)+sep+str(cosine_norm)+sep+str(cosine_p)+sep+str(cosine_tfidf)
def process_dterm(currterm, dprofile_raw, dtotal, dprofile_pval, dprofile_tfidf):
global sep
cosine_norm_dmag=0.0
cosine_p_dmag=0.0
cosine_tfidf_dmag=0.0
dprofile_norm = {}
# Generate normalised profile
for key in dprofile_raw:
dprofile_norm[key] = float(dprofile_raw[key]) / dtotal
cosine_norm_dmag=cosine_norm_dmag+(dprofile_norm[key]*dprofile_norm[key])
cosine_p_dmag=cosine_p_dmag+(dprofile_pval[key]*dprofile_pval[key])
cosine_tfidf_dmag=cosine_tfidf_dmag+(dprofile_tfidf[key]*dprofile_tfidf[key])
cosine_norm_dmag=math.sqrt(cosine_norm_dmag)
cosine_p_dmag=math.sqrt(cosine_p_dmag)
cosine_tfidf_dmag=math.sqrt(cosine_tfidf_dmag)
currgene=0
gtotal=0.0
gprofile_raw = {}
gprofile_norm = {}
gprofile_pval = {}
gprofile_tfidf = {}
# Do all the gene processing here
gfile=open(sys.argv[2], 'r')
for line in gfile:
if line[0] == '#':
continue
tuple=line.strip().split(sep)
gene=tuple[0]
gterm=tuple[1]
gcount=int(tuple[2])
gpval=float(tuple[6])
gtfidf=float(tuple[7])
if not(currgene):
currgene=gene
if not(gene==currgene):
compare_gdterm(currgene, currterm, dprofile_raw, dtotal, dprofile_norm, dprofile_pval, dprofile_tfidf, cosine_norm_dmag, cosine_p_dmag, cosine_tfidf_dmag, gprofile_raw, gtotal, gprofile_norm, gprofile_pval, gprofile_tfidf)
# Reset gene profile
currgene=gene
gtotal=0.0
gprofile_raw = {}
gprofile_norm = {}
gprofile_pval = {}
gprofile_tfidf = {}
gtotal = gtotal+gcount
gprofile_raw[gterm]=gcount
gprofile_pval[gterm]=gpval
gprofile_tfidf[gterm]=gtfidf
gfile.close()
# Process the last gene
compare_gdterm(currgene, currterm, dprofile_raw, dtotal, dprofile_norm, dprofile_pval, dprofile_tfidf, cosine_norm_dmag, cosine_p_dmag, cosine_tfidf_dmag, gprofile_raw, gtotal, gprofile_norm, gprofile_pval, gprofile_tfidf)
main()
|
UTF-8
|
Python
| false | false | 2,012 |
7,971,459,332,279 |
460583d0a774f06582ef4a649a3f884855b82837
|
13b728d66b4c237b577a27de80977c0b89bf48c1
|
/PreFEst.py
|
c2ce446101d5f7431fcb80c1ed9b47dd5cad2495
|
[] |
no_license
|
mtb-beta/PreFEst
|
https://github.com/mtb-beta/PreFEst
|
daf4d65fb643b9cd55d504f37e2e1daff77f54a5
|
7ed9083f77ca585a7808c1064a6c5c931e38d761
|
refs/heads/master
| 2016-08-07T19:13:05.135635 | 2012-11-13T05:15:22 | 2012-11-13T05:15:22 | 6,591,742 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import harmonic_analysis as ha
import sound_model_PreFEst as smp
import numpy
def E_step(t,harmonic,weight,sound_model):
E = 0
for pich in range(len(weight)-1):
cent = 55.0*(2.0**((11+pich)/12.0))
#print "pich:"+str(pich)
#print "cent:"+str(cent)
for n in range(len(harmonic[pich][t])-1):
#print "n:"+str(n)
#print "harmonic:"+str(harmonic[pich][t][n])
#print "weight:"+str(weight[pich])
#print "sound_model:"+str(len(sound_model))
#print "sound_model[pich]:"+str(len(sound_model[pich]))
#print "cent*n:"+str(cent*n)
#print "sound_model[pich][cent*n]:"+str(sound_model[pich][cent*n])
E +=harmonic[pich][t][n]*numpy.log(weight[pich]*sound_model[pich][cent*(n+1)])
#print "E:"+str(E)
return E
def M_step(t,harmonic,weight,sound_model,E):
new_weight = numpy.zeros(len(weight))
for pich in range(len(new_weight)):
cent = 55.0*(2.0**((11+pich)/12.0))
weight_tmp = 0
for n in range(len(harmonic[pich][t])):
#print "line"
#print harmonic[pich][n]
#print weight[pich]
#print cent*n
#print pich
#print sound_model[pich][cent*n]
weight_tmp += harmonic[pich][t][n]*(weight[pich]*sound_model[pich][cent*(n+1)])/E
new_weight[pich]=weight_tmp
return new_weight
"""
emアルゴリズムを行う。
"""
def em_algorithm(harmonic):
count = 0
max_cent =16000
W=[]
#予め用意しておいた音モデルを読み込む。
sound_model =read_sound_model(0)
for pich in range(1,39):
sound_model=numpy.vstack([sound_model,read_sound_model(pich)])
#重みを初期化する
weight = numpy.ones(len(harmonic))
weight = weight/numpy.sum(weight)
new_weight= weight
count = 0
print str(len(harmonic[0]))
for t in range(len(harmonic[0])/5):
print "Processing time"+str(t)+",counter is "+str(count)
new_weight = numpy.ones(len(harmonic))
new_weight = new_weight/numpy.sum(new_weight)
count =0
while(count < 1):
#print "started L49:"+str(count)
old_weight = new_weight
E = E_step(t,harmonic,new_weight,sound_model)
new_weight = M_step(t,harmonic,new_weight,sound_model,E)
new_weight = new_weight/numpy.sum(new_weight)
if(abs(numpy.linalg.norm(old_weight)-numpy.linalg.norm(new_weight))<0.0001):
break
count +=1
print new_weight
weight = new_weight
W.append(weight)
return W
"""
事前に処理しておいた調波構造テキストファイル群を読み込む
"""
def read_harmonic_power(filename):
harmonic = []
for i in range(39):
fname="./data/"+filename+"_harmonic"+str(i)+".txt"
f = open(fname)
data = numpy.loadtxt(fname,delimiter=',')
harmonic.append(data)
f.close()
return harmonic
"""
事前に処理しておいた音モデルファイル群を読み込む
"""
def read_sound_model(n):
fname="./data/sound_model_"+str(n)+".txt"
f = open(fname)
data = numpy.loadtxt(fname,delimiter=',')
f.close()
return data
"""
実行結果を書き出す。
"""
def write_weight(filename,weight):
filename2 = (str)(filename)+"_PreFEst_result.txt"
f =open(filename2,'w')
print "process result output file at ./data"
for n in range(len(weight[0])):
for t in range(len(weight)-1):
f.write(str(weight[t][n])+',')
f.write(str(weight[len(weight)-1][n])+'\n')
f.close()
import os
os.system("mv "+filename2+" ./data")
if __name__=='__main__':
filename ="ES_GV_C_mix_all_mono"
import make_Pow as mp
from sys import argv
from os.path import splitext
filename,format = splitext(argv[1])
harmonic = read_harmonic_power(filename)
#print len(harmonic)#39:pich
#print len(harmonic[0])#658:frame
#print len(harmonic[0][0])# 16:n
w=em_algorithm(harmonic)
write_weight(filename,w)
|
UTF-8
|
Python
| false | false | 2,012 |
5,265,629,947,805 |
58b69ecb2d49e625813eba7eec1e1e28a14140de
|
39ea9ac594dce79a223e71a4680a21951a1571b7
|
/private/indexer.py
|
efc5bf941f85b5383f5b436246a162ba91abc7a7
|
[] |
no_license
|
pnpranavrao/github_instant
|
https://github.com/pnpranavrao/github_instant
|
8be80a7570b0c76f21770eea01bbbaefdc3d06ce
|
6a3755fd4c06bc84fdf50800b8bce36d0235622d
|
refs/heads/master
| 2021-03-12T23:32:19.858007 | 2014-10-05T11:40:49 | 2014-10-05T11:40:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from elasticsearch import Elasticsearch
import sys
import ipdb
import os
import shutil
from pprint import pprint
from subprocess import call
from git import Repo
import re
from pygments import highlight
from pygments.lexers import get_lexer_for_filename
from pygments.formatters import HtmlFormatter
INDEX_NAME=sys.argv[1]
TYPE_NAME=sys.argv[2]
TYPE_NAME_FUNC=sys.argv[3]
ELASTICSEARCH_URL=sys.argv[4]
REPO=sys.argv[5]
PRIVATE_PATH=sys.argv[6]
PREVIEW_SIZE = 50
FULL_SIZE = 20000
es = Elasticsearch([ELASTICSEARCH_URL])
def pygmentize(filename, filestring):
lexer = get_lexer_for_filename(filename)
formatter = HtmlFormatter(cssclass="source", linespans="line")
f = open("test.html", 'w')
result = highlight(filestring, lexer, formatter)
return result
def get_paths(path, repo_url):
for root, dirs, files in os.walk(path, topdown=False):
#implement ignoring many files here
full_root = root
root = root.split(path)[1]
if root and (root[0] == "."):
continue
for name in files:
if name and (name[0] == '.' or '.' not in name):
continue
path_name = os.path.join(root, name)
full_path_name = os.path.join(full_root, name)
functions = []
with open(full_path_name, 'r') as f:
read_data = f.read(FULL_SIZE)
preview = ""
content = ""
line_number = 0
f.seek(0)
for line in f:
if line_number <= PREVIEW_SIZE:
preview += line
content += line
line_number += 1
function_name = find_function(line)
if function_name:
if function_name.startswith("self."):
function_name = function_name[5:]
functions.append({"function_name":function_name, "line_number":line_number})
try:
content = pygmentize(name, content)
preview = pygmentize(name, preview)
except Exception as e:
continue
body = {"path":path_name, "name":name, "body":content, "body_preview": preview, "repo_url" : repo_url }
ret = es.index(index=INDEX_NAME, doc_type=TYPE_NAME, body=body)
file_id = ret[u'_id']
for function in functions:
body = {"function_name":function["function_name"], "line_number":function["line_number"], "path":path_name, "repo_url" : repo_url}
es.index(index=INDEX_NAME, doc_type=TYPE_NAME_FUNC, body=body, parent=file_id)
def find_function(line):
regex = re.compile("\sdef\s(.+)\s")
function_name = regex.findall(line)
if function_name:
if '(' in function_name[0]:
return function_name[0].split('(')[0]
else:
return function_name[0].split(" ")[0]
else:
return None
def clone_repo(repo_dir):
repo_url = "https://github.com/" + REPO
call(["git", "clone", repo_url, repo_dir])
shutil.rmtree(repo_dir + "/.git")
def main():
repo_dir = PRIVATE_PATH + "/" + REPO.split("/")[1]
clone_repo(repo_dir)
get_paths(repo_dir, REPO)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
10,711,648,473,464 |
e72c7fb2fd2b6179272f3b7dab9ea189da8aa8d9
|
a863187808f14aec84cc561bdf8db690f7c79c6a
|
/src/utils/__init__.py
|
b19329f80fe79fa958972051d7486cee0b5de9da
|
[
"Unlicense"
] |
permissive
|
Shu-Ji/multi-supervisord-web-admin
|
https://github.com/Shu-Ji/multi-supervisord-web-admin
|
c1794cbdce5633e20402640408dde9fa1ed02495
|
20d1932c0ddc9293564fb6d99b4a1f4a5c9307e6
|
refs/heads/master
| 2016-09-06T17:04:23.338781 | 2014-07-22T10:17:03 | 2014-07-22T10:17:03 | 22,051,673 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: u8
import datetime
from hashlib import md5 as m5
import traceback
now = datetime.datetime.now
now_str = lambda: dt2str(now())
yesterday = lambda: datetime.date.today() - datetime.timedelta(days=1)
yesterday_str = lambda: yesterday().strftime('%Y-%m-%d')
tomorrow = lambda: datetime.date.today() + datetime.timedelta(days=1)
tomorrow_str = lambda: tomorrow().strftime('%Y-%m-%d')
str2dt = lambda s: datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
dt2str = lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S')
md5 = lambda s: m5(s).hexdigest()
def async(max_workers=10, debug=False):
from concurrent.futures import ThreadPoolExecutor
from functools import partial, wraps
import tornado.ioloop
import tornado.web
EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def unblock(f):
@tornado.web.asynchronous
@wraps(f)
def wrapper(*args, **kwargs):
self = args[0]
def callback(future):
try:
self.write(future.result() or '')
except:
if debug:
try:
self.write('<pre>%s</pre>' % traceback.format_exc())
except:
pass
finally:
self.try_finish()
EXECUTOR.submit(
partial(f, *args, **kwargs)
).add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(
partial(callback, future)))
return wrapper
return unblock
|
UTF-8
|
Python
| false | false | 2,014 |
5,016,521,833,560 |
dd3cfbf6f1751c43c0d0deba58958296924dd019
|
fdc7d1ca6d83735f241dfa173b0012a3866ad39e
|
/Euler/euler12.2.py
|
b69f9de70b03d5a2686107935974fc3aafc014fc
|
[] |
no_license
|
Mohamedtareque/joac-python
|
https://github.com/Mohamedtareque/joac-python
|
33b61ff7c3183c2226ce3939b215d95903482fcc
|
cf0d2829f344bc5f1392d657b5e48924e015d715
|
refs/heads/master
| 2020-04-06T04:48:00.311967 | 2013-03-01T20:29:54 | 2013-03-01T20:29:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import *
def triangleGenerator():
a = 0
b = 0
while True:
a += 1
b += a
yield a
def getdivisores(numero):
counter = 2
#divisor = 2
for divisor in xrange(2, int(sqrt(numero)+1)):
if not numero%divisor:
counter += 2
return counter
divisores = 0
numero = triangleGenerator()
a = 0
b = 0
while divisores < 500:
b += 1
a += b
divisores = getdivisores(a)
#print a, divisores
if not b%1000:
print a, divisores
print "Numero: %d \t Iteraciones: %d \t Divisores: %d" % (a, b, divisores)
|
UTF-8
|
Python
| false | false | 2,013 |
1,941,325,228,602 |
4fe298cbd1e53a35f610b2d57bb26f84c8b6db68
|
333605109372a4c86168deeadffe3e7f2f669d28
|
/packet/packet.py
|
9cddba9e6639edf4d51c11bdcb6354fd22c947b1
|
[] |
no_license
|
bytearchive/packetpy
|
https://github.com/bytearchive/packetpy
|
6dd0f628f4a00ad571fff36a215cafd6a6c9337c
|
83ab645987ae8aa42ee92fb6a8d9f5f25602376a
|
refs/heads/master
| 2021-01-12T13:16:40.469725 | 2009-08-26T09:17:45 | 2009-08-26T09:17:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright (c) 2003-2008, Nullcube Pty Ltd
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
PacketPy is a pure-Python module for parsing, modifying and creating
network packets.
"""
# TODO:
# - Documentation
# - The Packet class should have a fairly complete explanation in
# the Countershape docs (has been roughed out now)
# - Descriptors, classes, RFC references
# - Expand in-code documentation for use with pydoc.
# - 100% unit test coverage.
# - Better support for IP and TCP options.
# - ICMPv6 types.
# - Expose bytefields for all descriptors.
# - Unit-test more error conditions, and improve error reporting.
# - Protocols:
# - PPP
# - PFSync
# - Carp
# - Routing protocols
# - Higher-level protocols
# - SLIP
# - BOOTP
# - More extensive testing.
# - Generalise all protocol unit-tests to take a packet argument to act on.
# - Use generalised tests to exhaustively test the functionality of user-created packets.
# - A warnings framework to warn users of possibly undesired
# situations, eg. the construction of impossible packets, etc.
# - A detailed "dump" function that outputs all information about a packet.
# - Packet Captures Required:
# ICMPv4
# - ICMPRouterAdvertisement
# - ICMPInformationRequest
# - ICMPInformationReply
# - ICMPRedirect
# ICMPv6
# - ICMP6DestinationUnreachable
# - ICMP6PacketTooBig
# - ICMP6TimeExceeded
import array, sys
import _sysvar
from _packetDescriptors import *
import utils
class PacketPyError(Exception): pass
class DataBoundsError(Exception): pass
#
# An options object that maps names to protocol numbers from the most recent
# IANA "Assigned Numbers" RFC
#
ProtocolOptions = Options(
ICMP = 1,
IGMP = 2,
IP_IN_IP = 4,
TCP = 6,
UDP = 17,
IPv6 = 41,
ROUTING_HEADER = 43,
FRAGMENTATION_HEADER = 44,
ESP = 50,
AH = 51,
ICMP6 = 58,
NO_NEXT_HEADER = 59,
DESTINATION_OPTIONS_HEADER = 60
)
def createPacket(*headerClasses):
size = 0
for i in headerClasses:
size += i._SIZEHINT
p = Packet(headerClasses[0], "\0"*size, 0)
p.protostack = headerClasses[0](p, 0, 0)
currentProtocol = p.protostack
for i in headerClasses[1:]:
currentProtocol._addProtocol(i, currentProtocol._SIZEHINT, 0)
currentProtocol = currentProtocol._next
p._selfConstruct()
return p
class Packet(object):
"""
This class represents a single given packet. Protocols within the
packet protocol structure can be accessed as follows:
p["icmp"] - Returns the FIRST protocol of type "icmp" in the stack.
p["icmp"]["icmp"] - Returns the SECOND protocol of type "icmp".
p["ip"]["icmp"] - Returns the first ICMP protocol after the first IP protocol.
Or
p.getProtoList() - Returns a list of all protocols in the stack.
"""
def __init__(self, klass, packet, _initialise = 1):
"""
klass : A class representing the first protocol in the protocol stack
packet : The packet._data as a string.
"""
self._data = array.array("c", packet)
self._klass = klass
self.protostack = None
if _initialise:
self.initialise()
def __getitem__(self, proto):
return self.protostack[proto]
def has_key(self, key):
return self.protostack.has_key(key)
def __len__(self):
return len(self._data)
def __repr__(self):
out = []
for i in self.getProtoList():
s = repr(i)
if s:
out.append("[" + s + "]")
return " ".join(out)
def initialise(self):
self.protostack = self._klass(self)
def getProtoList(self):
"""
Returns a list of all protocol objects in the stack.
"""
return self.protostack._getProtoList()
def getRaw(self):
"""
Return the raw packet data.
"""
return self._data.tostring()
def _fixChecksums(self):
"""
Fix all protocol checksums for this packet.
"""
lst = self.getProtoList()
lst.reverse()
for i in lst:
i._fixChecksums()
def _fixDimensions(self):
"""
Fix packet dimension fields.
"""
lst = self.getProtoList()
for i in lst:
i._fixDimensions()
def finalise(self):
self._fixDimensions()
self._fixChecksums()
def _selfConstruct(self):
lst = self.getProtoList()
for i in lst:
i._selfConstruct()
class Protocol(object):
"""
Derived classes must define the following:
TYPE The name of this protocol.
"""
TYPE = "Protocol"
def __init__(self, packet, offset=0, constructNext=1):
"""
Packet initialisation.
packet - An instance of the Packet class
offset - The byte offset of the start of this protocol
construct - Create the next protocol in the chain?
"""
self.packet = packet
self.offset = offset
self._next = None
self._prev = None
self._printList = []
if constructNext:
self._constructNext()
def __repr__(self):
return self.TYPE
def __getitem__(self, proto):
h = self._getProtoListInclusive()
for i in h:
if i.isType(proto):
return i
raise KeyError, "No such protocol: %s."%proto
def has_key(self, key):
h = self._getProtoListInclusive()
for i in h:
if i.isType(key):
return 1
return 0
def __len__(self):
i = 0
current = self
while 1:
i += 1
if current._next:
current = current._next
else:
break
return i
def isType(self, name):
"""
Is this protocol of type "name"? Used for dict-like acces from
Packet.
We inspect the inheritance tree of the class, and check the TYPE
attribute of each one. This allows us to, for instance, refer to
either "icmp" and "icmpechorequest".
"""
for i in self.__class__.__mro__:
if hasattr(i, "TYPE"):
if i.TYPE.lower() == name.lower():
return 1
return 0
def _getByteField(self, frm, tlen):
"""
Return a number of bytes relative to the start of the current
protocol.
"""
if (tlen == 1):
try:
return self.packet._data[self.offset+frm]
except IndexError:
return ''
else:
return self.packet._data[self.offset+frm:self.offset+frm+tlen].tostring()
def _setByteField(self, frm, tlen, val):
"""
Set a number of bytes relative to the start of the current
protocol header.
"""
self.packet._data[self.offset+frm:self.offset+frm+tlen] = array.array("c", val)
def _getIntField(self, frm, tlen):
"""
Return an integer corresponding to a whole number of bytes,
relative to the start of the current protocol header.
"""
b = self._getByteField(frm, tlen)
if b and tlen == 1:
return ord(b)
else:
return utils.multiord(b)
def _setIntField(self, frm, tlen, val):
"""
Set a field of bits to an integer value. The bit field position is
specified relative to the start of the current protocol header.
"""
return self._setByteField(frm, tlen, utils.multichar(val, tlen))
def _getBitField(self, frm, bitoffset, bitlen):
"""
Retrieve the integer value of a set of bits.
byteoffset : offset in bytes of the first byte from the start of data.
bitoffset : offset in bits from byteoffset to the first bit.
bitlen : number of bits to be extracted.
"""
byteLen, m = divmod(bitoffset + bitlen, 8)
if m:
byteLen = byteLen+1
x = self._getIntField(frm, byteLen)
# Clear the high bits:
x = x&(pow(2, (byteLen*8 - bitoffset))-1)
# Shift out the low bits:
x = x>>(byteLen*8 - bitoffset - bitlen)
return x
def _setBitField(self, frm, bitoffset, bitlen, val):
"""
Set the integer value of a set of bits.
bitoffset : offset it bits from the start of the data.
bitlen : length of the bitfield to be set.
val : value to be written into place as an integer.
"""
if 0 > val or val > (pow(2, bitlen)-1):
raise ValueError, "Bit field must be in range 0-%s"%(pow(2, bitlen)-1)
byteLen, m = divmod(bitoffset + bitlen, 8)
if m:
byteLen = byteLen+1
val = val << (byteLen*8 - bitoffset - bitlen)
old = self._getIntField(frm, byteLen)
# Now we clear the corresponding bits in the old value
mask = ~((pow(2, bitlen)-1) << (byteLen*8 - bitoffset - bitlen))
old = old&mask
self._setIntField(frm, byteLen, old|val)
def _splice(self, frm, to, val):
self.packet._data[self.offset+frm:self.offset+to] = array.array("c", val)
self.packet.initialise()
def _addProtocol(self, protocol, nextoffset, *args):
p = protocol(self.packet, self.offset + nextoffset, *args)
self._next = p
p._prev = self
def _getProtoList(self):
x = self
while x._prev:
x = x._prev
return x._getProtoListInclusive()
def _getProtoListInclusive(self):
current = self
lst = []
while 1:
lst.append(current)
if current._next:
current = current._next
else:
break
return lst
def _getSizehint(self):
"""
Retrieve the cumulative SIZEHINT for this protocol and all
protocols it contains.
"""
sizehint = 0
for i in self._getProtoListInclusive():
sizehint += i._SIZEHINT
return sizehint
def _fixDimensions(self):
"""
The _fixDimensions method for each protocol adjusts offset and
length fields to take accound of a possible change in payload
length. This is done as follows:
- Take the difference between protocol.offset and the total
length of the packet.
- Now subtract the expected length the protocol headers.
- This gives you the payload length. Adjust dimension
fields to suit.
"""
pass
def _fixChecksums(self):
"""
Protocol checksum methods are called in reverse order. That is, if
we have a packet [Ethernet][IP][TCP], the finalise method for TCP
will be called first, then IP, then Ethernet.
"""
return
def _constructNext(self):
"""
This method should be implemented for all protocols that can
contain nested protocols. It should construct the next protocol in
the chain, and will usually add it using the _addProtocol method.
"""
pass
def _selfConstruct(self):
"""
This method performs the "self-construction" actions for a given
protocol. It is called during the packet creation process, i.e.
when packets are created from scratch. This method should:
- Inspect the _next class, and set protocol identification
information accordingly.
- Set sensible defaults (e.g. ttls, IP header version fields, etc.)
"""
pass
def ipOptionFactory(parent, ipproto, offset):
# We inspect the first byte of the Options to find the initial Option type.
optionJmpTable = {
_IPOption._TypeOptions["EndOfList"]: IPOptionEndOfList,
_IPOption._TypeOptions["NOP"]: IPOptionNOP,
_IPOption._TypeOptions["Security"]: IPOptionSecurity,
_IPOption._TypeOptions["LooseSourceRouting"]: IPOptionLooseSourceRouting,
_IPOption._TypeOptions["StrictSourceRouting"]: IPOptionStrictSourceRouting,
_IPOption._TypeOptions["RecordRoute"]: IPOptionRecordRoute,
_IPOption._TypeOptions["StreamID"]: IPOptionStreamID,
_IPOption._TypeOptions["InternetTimestamp"]: IPOptionInternetTimestamp,
}
opt = _IPOption(parent, ipproto, offset)
if optionJmpTable.has_key(opt.optionType):
return optionJmpTable[opt.optionType](parent, ipproto, offset)
else:
return None
class _IPOption(Protocol):
_TypeOptions = Options(
EndOfList = 0x0,
NOP = 0x1,
Security = 0x130,
LooseSourceRouting = 0x131,
StrictSourceRouting = 0x137,
RecordRoute = 0x7,
StreamID = 0x136,
InternetTimestamp = 0x138
)
optionType = IntField(0, 1, options=_TypeOptions)
length = 1
def __init__(self, parent, ipproto, offset):
Protocol.__init__(self, parent, offset)
self.ipproto = ipproto
self._next = self._getNext()
def _getNext(self):
if not (self.offset > (self.ipproto.offset + (self.ipproto.headerLength * 4))):
return ipOptionFactory(self.packet, self.ipproto, self.offset + self.length)
else:
return None
def initialise(self):
self.packet.initialise()
class IPOptionEndOfList(_IPOption):
TYPE = "IPOptionEndOfList"
def _getNext(self):
return None
class IPOptionNOP(_IPOption):
TYPE = "IPOptionNOP"
class _IPOptionExtended(_IPOption):
"""
An IP Option that has an associated length.
"""
length = IntField(1, 1)
def __init__(self, *args):
_IPOption.__init__(self, *args)
class IPOptionSecurity(_IPOptionExtended):
TYPE = "IPOptionSecurity"
payload = Payload()
def _getPayloadOffsets(self):
offset = 2
dataLength = self.length
return offset, dataLength
class _IPOptionRouting(_IPOptionExtended):
pointer = IntField(2, 1)
addrlist = DescriptorProxy("_addrlist")
def __init__(self, *args):
_IPOptionExtended.__init__(self, *args)
self._addrlist = IPAddressList(3, self.length-3)
class IPOptionLooseSourceRouting(_IPOptionRouting):
TYPE = "IPOptionLooseSourceRouting"
class IPOptionStrictSourceRouting(_IPOptionRouting):
TYPE = "IPOptionStrictSourceRouting"
class IPOptionRecordRoute(_IPOptionRouting):
TYPE = "IPOptionRecordRoute"
class IPOptionStreamID(_IPOptionExtended):
TYPE = "IPOptionStreamID"
payload = Payload()
def _getPayloadOffsets(self):
offset = 2
dataLength = self.length
return offset, dataLength
class IPOptionInternetTimestamp(_IPOptionExtended):
TYPE = "IPOptionInternetTimestamp"
_FlagsOptions = Options(
TIMESTAMP_ONLY = 0,
IP_PRECEDES = 1,
IP_PRESPECIFIED = 3
)
length = IntField(2, 1)
overflow = BitField(3, 0, 4)
flag = FlagsField(3, 4, 4, options=_FlagsOptions)
payload = Payload()
def _getPayloadOffsets(self):
dataLength = self.length - 4
return 4, dataLength
class IP(Protocol):
TYPE = "IP"
_SIZEHINT = 20
_FlagsOptions = Options(
MF = 1,
DF = 2,
RES = 4 # Reserved bit
)
# Fields
version = BitField(0, 0, 4, "IP Protocol Version")
headerLength = BitField(0, 4, 4, "Length of IP Header in 32-bit words")
tos = IntField(1, 1, "Type of Service")
length = IntField(2, 2, "Length of packet in bytes, including payload")
ident = IntField(4, 2, "IP identification number")
flags = FlagsField(6, 0, 3, "IP Flags", options=_FlagsOptions)
fragmentOffset = BitField(6, 3, 13, "Fragment Offset")
ttl = IntField(8, 1, "Time to Live")
protocol = IntField(9, 1, "Contained Protocol", options=ProtocolOptions)
checksum = IntField(10, 2, "32-bit CRC")
src = IPAddress(12, "Source IP Address")
dst = IPAddress(16, "Destination IP Address")
optionsField = DescriptorProxy("_optionsField")
# FIXME: header padding
payload = Payload()
def __init__(self, *args):
Protocol.__init__(self, *args)
self._optionsField = ByteField(20, (self.headerLength*4) - 20)
if self.optionsField:
self.options = ipOptionFactory(self.packet, self, self.offset+20)
def __repr__(self):
return "IP: %s->%s"%(self.src, self.dst)
def _getPayloadOffsets(self):
offset = self.headerLength*4
dataLength = self.length - offset
return offset, dataLength
def _selfConstruct(self):
if self._next:
# We make a horrid exception for ICMP - ICMP can be any of a number of classes...
if isinstance(self._next, ICMPBase):
self.protocol = "ICMP"
else:
self.protocol = IP4_PROTO_JUMPER.get(self._next.__class__, 0)
self.headerLength = self._SIZEHINT/4
self.length = self._getSizehint()
self.version = 4
self.ttl = 255
def _constructNext(self):
try:
if IP4_PROTO_JUMPER.has_key(self.protocol):
self._addProtocol(IP4_PROTO_JUMPER[self.protocol], self.headerLength*4)
except DataBoundsError:
# If our data length is too short, we simply don't consruct the
# next proto...
pass
def _fixChecksums(self):
self.checksum = 0
self.checksum = utils.cksum16(self.packet._data[self.offset:self.offset + (self.headerLength * 4)])
def _fixDimensions(self):
self.length = (len(self.packet) - self.offset)
class ICMPBase(Protocol):
TYPE = "ICMP"
_SIZEHINT = 4
_TypeOptions = Options(
ICMPEchoReply = 0,
ICMPDestinationUnreachable = 3,
ICMPSourceQuench = 4,
ICMPRedirect = 5,
ICMPEchoRequest = 8,
ICMPRouterAdvertisement = 9,
ICMPRouterSolicitation = 10,
ICMPTimeExceeded = 11,
ICMPParameterProblem = 12,
ICMPTimestampRequest = 13,
ICMPTimestampReply = 14,
ICMPInformationRequest = 15,
ICMPInformationReply = 16,
ICMPAddressMaskRequest = 17,
ICMPAddressMaskReply = 18
)
itype = IntField(0, 1, "ICMP Type", _TypeOptions)
code = IntField(1, 1, "ICMP Code (See CODE_* attributes)")
checksum = IntField(2, 2, "CRC16 Checksum")
payload = Payload()
def _fixChecksums(self):
self.checksum = 0
self.checksum = utils.cksum16(self._prev.payload)
def _selfConstruct(self):
self.itype = self.TYPE
def _getPayloadOffsets(self):
offset = 8
dataLength = self._prev.length - (self._prev.headerLength*4 + offset)
return offset, dataLength
class _ICMPIDSeqBase(ICMPBase):
"""
Base for ICMP packets that also have an ID and a sequence
number.
"""
_SIZEHINT = 8
identifier = IntField(4, 2)
seq_num = IntField(6, 2)
class _ICMPWithIPHdr(ICMPBase):
"""
Base for ICMP packets that also have an appended IP header
"""
_SIZEHINT = 8 + IP._SIZEHINT + 64
def __init__(self, packet, offset=None, *args):
ICMPBase.__init__(self, packet, offset, *args)
self.iphdr = IP(packet, offset+8)
class ICMPDestinationUnreachable(_ICMPWithIPHdr):
"""
ICMP Destination Unreachable
See RFC 792
"""
TYPE = "ICMPDestinationUnreachable"
_CodeOptions = Options(
NET_UNREACHABLE = 0,
HOST_UNREACHABLE = 1,
PROTOCOL_UNREACABLE = 2,
PORT_UNREACABLE = 3,
FRAG_NEEDED_DF_SET = 4,
SOURCE_ROUTE_FAILED = 5
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Destination Unreachable"
class ICMPSourceQuench(_ICMPWithIPHdr):
"""
ICMP Source Quench
gateway_addr : Gateway Address
See RFC 792
"""
TYPE = "ICMPSourceQuench"
_CodeOptions = Options(
SOURCE_QUENCH = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Source Quench"
class ICMPRedirect(_ICMPWithIPHdr):
"""
ICMP Redirect
gateway_addr : Gateway Address
"""
TYPE = "ICMPRedirect"
_SIZEHINT = 8
_CodeOptions = Options(
NETWORK_REDIRECT = 0,
HOST_REDIRECT = 1,
TOS_NETWORK_REDIRECT = 2,
TOSHOST_REDIRECT = 3
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
gateway_addr = IPAddress(4)
def __repr__(self):
return "ICMP Redirect"
class _ICMPEcho(_ICMPIDSeqBase):
TYPE = "ICMPEcho"
class ICMPEchoRequest(_ICMPEcho):
"""
ICMP Echo Request
identifier : Identifier
seq_num : Sequence Number
"""
TYPE = "ICMPEchoRequest"
_CodeOptions = Options(
ECHO_REQUEST = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Echo Request"
class ICMPEchoReply(_ICMPEcho):
"""
ICMP Echo Reply
identifier : Identifier
seq_num : Sequence Number
"""
TYPE = "ICMPEchoReply"
_CodeOptions = Options(
ECHO_REPLY = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Echo Reply"
class ICMPRouterAdvertisement(ICMPBase):
"""
ICMP Router Advertisement
itype : Type of ICMP packet
code : Code of more specific packet purpose
checksum : Checksum
"""
TYPE = "ICMPRouterAdvertisement"
_CodeOptions = Options(
ROUTER_ADVERTISEMENT = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
num_addresses = IntField(4, 1)
address_size = IntField(5, 1)
def __repr__(self):
return "ICMP Router Advertisement"
class ICMPRouterSolicitation(ICMPBase):
"""
ICMP Router Solicitation
itype : Type of ICMP packet
code : Code of more specific packet purpose
checksum : Checksum
"""
TYPE = "ICMPRouterSolicitation"
_SIZEHINT = 8
_CodeOptions = Options(
ROUTER_SOLICITATION = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
num_addresses = IntField(4, 1)
address_size = IntField(5, 1)
lifetime = IntField(6, 2)
# FIXME The contained router addresses need to be
# collected and mapped to an array for the user
def __repr__(self):
return "ICMP Router Solicitation"
class ICMPTimeExceeded(_ICMPWithIPHdr):
"""
ICMP Time Exceeded
gateway_addr : Gateway Address
"""
TYPE = "ICMPTimeExceeded"
_CodeOptions = Options(
TRANSIT = 0,
REASSEMBLY = 1
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Time Exceeded"
class ICMPParameterProblem(_ICMPWithIPHdr):
"""
ICMP Parameter Problem
gateway_addr : Gateway Address
"""
TYPE = "ICMPParameterProblem"
_CodeOptions = Options(
HEADER_BAD = 0,
OPTION_MISSING = 1
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
pointer = IntField(4, 1)
def __repr__(self):
return "ICMP Parameter Problem"
class _ICMPTimestampBase(_ICMPIDSeqBase):
"""
ICMP Timestamp Base
identifier : Identifier
seq_num : Sequence Number
"""
_SIZEHINT = 20
origin_ts = IntField(8, 4)
receive_ts = IntField(12, 4)
transmit_ts = IntField(16, 4)
class ICMPTimestampRequest(_ICMPTimestampBase):
"""
ICMP Timestamp Request
identifier : Identifier
seq_num : Sequence Number
origin_ts : Origin Timestamp
receive_ts : Receiver Timestamp
transmit_ts : Transmitting Timestamp
"""
TYPE = "ICMPTimestampRequest"
_CodeOptions = Options(
TIMESTAMP_REQUEST = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Timestamp Request"
class ICMPTimestampReply(_ICMPTimestampBase):
"""
ICMP Timestamp Reply
identifier : Identifier
seq_num : Sequence Number
origin_ts : Origin Timestamp
receive_ts : Receiver Timestamp
transmit_ts : Transmitting Timestamp
"""
TYPE = "ICMPTimestampReply"
_CodeOptions = Options(
TIMESTAMP_REPLY = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Timestamp Reply"
class ICMPInformationRequest(_ICMPIDSeqBase):
"""
ICMP Information Request
identifier : Identifier
seq_num : Sequence Number
"""
TYPE = "ICMPInformationRequest"
_CodeOptions = Options(
INFORMATION_REQUEST = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Information Request"
class ICMPInformationReply(_ICMPIDSeqBase):
"""
ICMP Information Reply
identifier : Identifier
seq_num : Sequence Number
"""
TYPE = "ICMPInformationReply"
_CodeOptions = Options(
INFORMATION_REPLY = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Information Reply"
class ICMPAddressMaskRequest(_ICMPIDSeqBase):
"""
ICMP Address Mask Request
identifier : Identifier
seq_num : Sequence Number
"""
TYPE = "ICMPAddressMarkRequest"
_CodeOptions = Options(
ADDRESSMASK_REQUEST = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
def __repr__(self):
return "ICMP Address Mask Request"
class ICMPAddressMaskReply(_ICMPIDSeqBase):
"""
ICMP Address Mask Reply
identifier : Identifier
seq_num : Sequence Number
"""
_SIZEHINT = 12
TYPE = "ICMPAddressMaskReply"
_CodeOptions = Options(
ADDRESSMASK_REPLY = 0
)
code = IntField(1, 1, "ICMP Code", options=_CodeOptions)
subnet_mask = IPAddress(8)
def __repr__(self):
return "ICMP Address Mask Reply"
_ICMPJumptable = {
ICMPBase._TypeOptions["ICMPECHOREPLY"] : ICMPEchoReply,
ICMPBase._TypeOptions["ICMPECHOREQUEST"] : ICMPEchoRequest,
ICMPBase._TypeOptions["ICMPDESTINATIONUNREACHABLE"] : ICMPDestinationUnreachable,
ICMPBase._TypeOptions["ICMPSOURCEQUENCH"] : ICMPSourceQuench,
ICMPBase._TypeOptions["ICMPREDIRECT"] : ICMPRedirect,
ICMPBase._TypeOptions["ICMPECHOREQUEST"] : ICMPEchoRequest,
ICMPBase._TypeOptions["ICMPROUTERADVERTISEMENT"] : ICMPRouterAdvertisement,
ICMPBase._TypeOptions["ICMPROUTERSOLICITATION"] : ICMPRouterSolicitation,
ICMPBase._TypeOptions["ICMPTIMEEXCEEDED"] : ICMPTimeExceeded,
ICMPBase._TypeOptions["ICMPPARAMETERPROBLEM"] : ICMPParameterProblem,
ICMPBase._TypeOptions["ICMPTIMESTAMPREQUEST"] : ICMPTimestampRequest,
ICMPBase._TypeOptions["ICMPTIMESTAMPREPLY"] : ICMPTimestampReply,
ICMPBase._TypeOptions["ICMPINFORMATIONREQUEST"] : ICMPInformationRequest,
ICMPBase._TypeOptions["ICMPINFORMATIONREPLY"] : ICMPInformationReply,
ICMPBase._TypeOptions["ICMPADDRESSMASKREQUEST"] : ICMPAddressMaskRequest,
ICMPBase._TypeOptions["ICMPADDRESSMASKREPLY"] : ICMPAddressMaskReply
}
def ICMP(*args):
"""
ICMP Factory Function.
"""
stub = ICMPBase(*args)
if _ICMPJumptable.has_key(stub.itype):
return _ICMPJumptable[stub.itype](*args)
else:
return stub
class TCP(Protocol):
"""
TCP
Fields :
srcPort : Source Port
dstPort : Destination Port
seq_num : Sequence Number
ack_num : Ack Number
dataOffset : Data Offset
reserved : Reserved
flags : Flags
window : Window
checksum : Checksum
urgent : Urgent
"""
TYPE = "TCP"
_SIZEHINT = 20
# Flags
_FlagsOptions = Options(
URG = 32,
ACK = 16,
PSH = 8,
RST = 4,
SYN = 2,
FIN = 1
)
# Fields
srcPort = IntField(0, 2)
dstPort = IntField(2, 2)
seq_num = IntField(4, 4)
ack_num = IntField(8, 4)
dataOffset = BitField(12, 0, 4)
reserved = BitField(12, 4, 6)
flags = FlagsField(13, 2, 6, options=_FlagsOptions)
window = IntField(14, 2)
checksum = IntField(16, 2)
# FIXME: TCP Options
urgent = IntField(18, 2)
payload = Payload()
def _getPayloadOffsets(self):
offset = self.dataOffset*4
dataLength = self._prev.length - (self._prev.headerLength*4 + self.dataOffset*4)
return offset, dataLength
def _getPseudoHeader(self):
ip = self._prev
tcplen = self._getPayloadOffsets()
tcplen = tcplen[0] + tcplen[1]
phdr = [
ip._getByteField(12, 4),
ip._getByteField(16, 4),
"\0",
ip._getByteField(9, 1),
utils.multichar(tcplen, 2)
]
return array.array("c", "".join(phdr))
def _fixChecksums(self):
tcplen = self._getPayloadOffsets()
tcplen = tcplen[0] + tcplen[1]
self.checksum = 0
self.checksum = utils.cksum16(self._getPseudoHeader() + self.packet._data[self.offset:self.offset+tcplen])
def _selfConstruct(self):
self.dataOffset = 5
def __repr__(self):
return "TCP: %s->%s"%(self.srcPort, self.dstPort)
class UDP(Protocol):
"""
UDP
srcPort : Source Port
dstPort : Destination Port
length : Length
checksum : Checksum
"""
TYPE = "UDP"
_SIZEHINT = 8
srcPort = IntField(0, 2)
dstPort = IntField(2, 2)
length = IntField(4, 2)
checksum = IntField(6, 2)
payload = Payload()
def _getPayloadOffsets(self):
offset = 8
dataLength = self._prev.length - (self._prev.headerLength*4 + offset)
return offset, dataLength
def _getPseudoHeader(self):
ip = self._prev
phdr = [
ip._getByteField(12, 4),
ip._getByteField(16, 4),
"\0",
ip._getByteField(9, 1),
self._getByteField(4, 2)
]
return array.array("c", "".join(phdr))
def _constructNext(self):
if self.srcPort in (68, 67) or self.dstPort in (68, 67):
self._addProtocol(DHCP, 8)
def _fixChecksums(self):
self.checksum = 0
self.checksum = utils.cksum16(self._getPseudoHeader() + \
self.packet._data[self.offset:self.offset+self.length])
def __repr__(self):
return "UDP: %s->%s"%(self.srcPort, self.dstPort)
class DHCP(Protocol):
"""
DHCP
op : Message Type
htype : Hardware Type
hlen : Hardware Address Length
hops : Hops
xid : Transaction ID
secs : Seconds Elapsed
flags : Bootp flags
ciaddr : Client IP Address
yiaddr : Your (client) IP Address
siaddr : Next Server IP Address
giaddr : Relay Agent IP Address
chaddr : Client MAC Address
sname : Server host name
file : Boot file name
Note that this class assumes that the hardware layer is Ethernet.
"""
TYPE = "DHCP"
_COOKIEOK = 1669485411 # The 4 integers decode to to this as single field
_FlagsOptions = Options(
UNICAST = 0,
BROADCAST = 32768,
)
op = IntField(0, 1)
htype = IntField(1, 1)
hlen = IntField(2, 1)
hops = IntField(3, 1)
xid = IntField(4, 4)
secs = IntField(8, 2)
flags = FlagsField(10, 0, 16, options=_FlagsOptions)
ciaddr = IPAddress(12, "Client IP Address")
yiaddr = IPAddress(16, "Client IP Address")
siaddr = IPAddress(20, "Next Server IP Address")
giaddr = IPAddress(24, "Relay IP Address")
chaddr = EthernetAddress(28)
sname = PaddedString(44, 64)
filename = PaddedString(108, 128)
cookie = IntField(236, 4)
payload = Payload()
def __repr__(self):
return """DHCP:
Message Type: %s
Hardware Type: %s
Hardware Address Length: %s
Hops: %s
Transaction ID: %s
Seconds Elapsed: %s
Bootp flags: %s
Client IP Address: %s
Your (client) IP Address: %s
Next Server IP Address: %s
Relay Agent IP Address: %s
Client MAC Address: %s
Server host name: %s
Boot file name: %s
Cookie: %s"""%(
self.op, self.htype, self.hlen, self.hops, self.xid,
self.secs, utils.i2b(self.flags), self.ciaddr, self.yiaddr,
self.siaddr, self.giaddr, self.chaddr, self.sname, self.filename,
("OK" if self.cookie == self._COOKIEOK else "BAD")
)
class IGMP(Protocol):
pass
class ARP(Protocol):
"""
ARP
Fields :
hardware_type : Hardware Type
protocol_type : Protocol Type
hardware_size : Hardware Size
protocol_size : Protocol Size
opcode : Op Code
sender_hardware_addr : Sender Hardware Address
sender_proto_addr : Sender Protocol Address
target_hardware_addr : Target Hardware Address
target_proto_addr : Target Protocol Address
"""
# FIXME: We may want to split this into RARP and ARP classes.
# FIXME: ARP is a strange case, in as much as its supposed to be
# general accross hardware and protocol types, but in practice it
# almost exclusively deals with Ethernet and IP. Perhaps we're letting a
# foolish generality be the hobgoblin of our small minds here?
TYPE = "ARP"
_SIZEHINT = 28
_OpcodeOptions = Options(
ARP_REQUEST = 1,
ARP_REPLY = 2,
RARP_REQUEST = 3,
RARP_REPLY = 4
)
hardware_type = IntField(0, 2)
protocol_type = IntField(2, 2)
hardware_size = IntField(4, 1)
protocol_size = IntField(5, 1)
opcode = IntField(6, 2, options = _OpcodeOptions)
# We defer the choice of hardware and protocol descriptor types to the
# instance.
sender_hardware_addr = DescriptorProxy("_sender_hardware_addr")
sender_proto_addr = DescriptorProxy("_sender_proto_addr")
target_hardware_addr = DescriptorProxy("_target_hardware_addr")
target_proto_addr = DescriptorProxy("_target_proto_addr")
def __init__(self, *args):
Protocol.__init__(self, *args)
self._sender_hardware_addr = EthernetAddress(8)
self._target_hardware_addr = EthernetAddress(18)
self._sender_proto_addr = IPAddress(14)
self._target_proto_addr = IPAddress(24)
def _selfConstruct(self):
self.hardware_type = 1
self.protocol_type = 0x800
self.hardware_size = 6
self.protocol_size = 4
self.opcode = "arp_request"
def __repr__(self):
atype = self._OpcodeOptions.toStr(self.opcode)
return "%s: %s->%s"%(atype, self.sender_hardware_addr, self.target_hardware_addr)
class IPv6(Protocol):
"""
IPv6
version : Version
diffservices : Differentiated Services
flowlabel : Flow Label
payloadlength : Payload Length
nextheader : Next Header
hoplimit : Hop Limit
src : Source Address
dst : Destination Address
"""
TYPE = "IPv6"
_SIZEHINT = 40
# Protocols
version = BitField(0, 0, 4)
diffservices = BitField(0, 4, 8)
flowlabel = BitField(1, 4, 20)
payloadlength = IntField(4, 2)
nextheader = IntField(6, 1, options=ProtocolOptions)
hoplimit = IntField(7, 1)
src = IPv6Address(8)
dst = IPv6Address(24)
payload = Payload()
def _selfConstruct(self):
if self._next:
# We make a horrid exception for ICMP6 - ICMP6 can be any of a number of classes...
if isinstance(self._next, ICMP6Base):
self.nextheader = "ICMP6"
else:
self.nextheader = IP6_PROTO_JUMPER.get(self._next.__class__, 0)
self.version = 6
self.hoplimit = 255
self.diffservices = 0
def _constructNext(self):
try:
if IP6_PROTO_JUMPER.has_key(self.nextheader):
self._addProtocol(IP6_PROTO_JUMPER[self.nextheader], 40)
except DataBoundsError:
# If our data length is too short, we simply don't consruct the
# next proto...
pass
def _getPayloadOffsets(self):
return 40, self.payloadlength
def __repr__(self):
return "IPv6: %s->%s"%(self.src, self.dst)
def _getPseudoHeader(self):
phdr = [
self._getByteField(8, 16),
self._getByteField(24, 16),
utils.multichar(self.payloadlength, 4),
"\0\0\0", chr(58)
]
return "".join(phdr)
def _fixDimensions(self):
self.payloadlength = len(self.packet) - self.offset - 40
class IPv6HopByHopHeader(Protocol):
"""
IPv6 Hop by Hop Header
"""
TYPE = "IPv6HopByHopHeader"
_SIZEHINT = 2
class IPv6RoutingHeader(Protocol):
"""
IPv6 Routing Header
"""
TYPE = "IPv6RoutingHeader"
_SIZEHINT = 2
class IPv6FragmentHeader(Protocol):
"""
IPv6 Fragment Header
"""
TYPE = "IPv6FragmentHeader"
_SIZEHINT = 2
class IPv6DestinationOptionsHeader(Protocol):
"""
IPv6 Destination Options Header
"""
TYPE = "IPv6DestinationOptions"
_SIZEHINT = 2
class AH(Protocol):
"""
AH
"""
TYPE = "AH"
_SIZEHINT = 2
nextheader = IntField(0, 1, options=ProtocolOptions)
length = IntField(1, 1, "Length of the header in 32-bit words, minus 2")
reserved = IntField(2, 2)
spi = IntField(4, 4)
sequence = IntField(8, 4)
icv = DescriptorProxy("_icv")
payload = Payload()
def __init__(self, *args):
Protocol.__init__(self, *args)
self._icv = ByteField(12, ((self.length + 2) * 4) - 12)
def _getPayloadOffsets(self):
offset = (self.length + 2) * 4
dataLength = len(self.packet) - self.offset - offset
return offset, dataLength
def _constructNext(self):
if IP6_PROTO_JUMPER.has_key(self.nextheader):
self._addProtocol(IP6_PROTO_JUMPER[self.nextheader], 40)
class ESP(Protocol):
"""
ESP
"""
TYPE = "ESP"
_SIZEHINT = 2
spi = IntField(0, 4)
sequence = IntField(4, 4)
payload = Payload()
def _getPayloadOffsets(self):
offset = 8
dataLength = len(self._prev.payload) - 8
return offset, dataLength
class ICMP6Base(Protocol):
"""
Base Class for ICMP6
Fields :
icmp6type : Type of ICMP6 Packet
code : Code for description of packet purpose
checksum : Checksum
"""
# FIXME: There are more types than just these.
TYPE = "ICMP6"
_SIZEHINT = 8
_TypeOptions = Options(
DESTINATION_UNREACHABLE = 1,
PACKET_TOO_BIG = 2,
TIME_EXCEEDED = 3,
PARAMETER_PROBLEM = 4,
ECHO_REQUEST = 128,
ECHO_REPLY = 129,
MULTICAST_LISTENER_QUERY = 130,
MULTICAST_LISTENER_REPORT = 131,
MULTICAST_LISTENER_DONE = 132,
ROUTER_SOLICITATION = 133,
ROUTER_ADVERTISEMENT = 134,
NEIGHBOUR_SOLICITATION = 135,
NEIGHBOUR_ADVERTISEMENT = 136,
REDIRECT = 137,
ROUTER_RENUMBERING = 138,
NODE_INFO_QUERY = 139,
NODE_INFO_RESPONSE = 140,
INVERSE_ND_SOLICITATION = 141,
INVERSE_ND_ADV = 142
)
icmp6type = IntField(0, 1, options=_TypeOptions)
code = IntField(1, 1)
checksum = IntField(2, 2)
payload = Payload()
def _fixChecksums(self):
self.checksum = 0
self.checksum = utils.cksum16(
self._prev._getPseudoHeader() +
self._prev.payload
)
def _getPayloadOffsets(self):
"""
The default implementation for ICMPv6 gives the entire message body
as payload.
"""
offset = 4
dataLength = len(self.packet) - self.offset - 4
return offset, dataLength
class ICMP6DestinationUnreachable(ICMP6Base):
"""
ICMP6 Destination Unreachable
icmp6type : Type of ICMP6 Packet
code : Code for description of packet purpose
checksum : Checksum
"""
TYPE = "ICMP6DestinationUnreachable"
unused = ByteField(4, 4)
def __init__(self, *args):
ICMP6Base.__init__(self, *args)
self.ip6hdr = IPv6(packet, offset+8)
def __repr__(self):
return "ICMPv6 Destination Unreachable"
class ICMP6PacketTooBig(ICMP6Base):
"""
ICMP6 Packet Too Big
"""
TYPE = "ICMP6PacketTooBig"
mtu = ByteField(4, 4)
def __init__(self, *args):
ICMP6Base.__init__(self, *args)
self.ip6hdr = IPv6(packet, offset+8)
def __repr__(self):
return "ICMPv6 Packet Too Big"
class ICMP6TimeExceeded(ICMP6Base):
"""
ICMP6 Time Exceeded
"""
TYPE = "ICMP6TimeExceeded"
unused = ByteField(4, 4)
def __init__(self, *args):
ICMP6Base.__init__(self, *args)
self.ip6hdr = IPv6(packet, offset+8)
def __repr__(self):
return "ICMPv6 Time Exceeded"
class ICMP6ParameterProblem(ICMP6Base):
"""
ICMP6 Parameter Problem
"""
TYPE = "ICMP6ParameterProblem"
pointer = IntField(4, 4)
payload = Payload()
def __repr__(self):
return "ICMPv6 Parameter Problem"
class _ICMP6EchoBase(ICMP6Base):
"""
ICMP6 Echo Base Class
"""
identifier = IntField(4, 2)
seq_num = IntField(6, 2)
def __init__(self, packet, offset):
ICMP6Base.__init__(self, packet, offset)
self.ip6hdr = IPv6(packet, offset+8)
class ICMP6EchoRequest(_ICMP6EchoBase):
"""
ICMP6 Echo Request
identifier : Identifier Number
seq_num : Sequence Number
ip6hdr : IPv6 Header
"""
TYPE = "ICMP6EchoRequest"
def __repr__(self):
return "ICMPv6 Echo Request"
class ICMP6EchoReply(_ICMP6EchoBase):
"""
ICMP6 Echo Reply
identifier : Identifier Number
seq_num : Sequence Number
ip6hdr : IPv6 Header
"""
TYPE = "ICMP6EchoReply"
def __repr__(self):
return "ICMPv6 Echo Reply"
class ICMP6NeighbourBase(ICMP6Base):
"""
ICMP6 Neighbour Base Class
target_addr : Target Address
options : Contains a Link Layer Address
"""
target_addr = IPv6Address(8)
options = DescriptorProxy("_options_addr")
def __init__(self, *args):
Protocol.__init__(self, *args)
# FIXME: Implement more hardware and protocol types. At the moment we
# assume that we're talking about IPv6 over Ethernet.
self._options_addr = EthernetAddress(26)
class ICMP6NeighbourSolicitation(ICMP6NeighbourBase):
"""
ICMP6 Neighbour Solicitation
target_addr : Target Address
options : Contains a Link Layer Address
"""
TYPE = "ICMP6NeighbourSolicitation"
def __repr__(self):
return "ICMPv6 Neighbour Solicitation"
class ICMP6NeighbourAdvertisement(ICMP6NeighbourBase):
"""
ICMP6 Neighbour Advertisement
target_addr : Target Address
options : Contains a Link Layer Address
"""
TYPE = "ICMP6NeighbourAdvertisement"
_FlagsOptions = Options(
OVERRIDE = 1,
SOLICITED = 2,
ROUTER = 4
)
# Fields
flags = FlagsField(4, 0, 3, options=_FlagsOptions)
def __repr__(self):
return "ICMPv6 Neighbour Advertisement"
_ICMP6Jumptable = {
ICMP6Base._TypeOptions["DESTINATION_UNREACHABLE"]: ICMP6DestinationUnreachable,
ICMP6Base._TypeOptions["PACKET_TOO_BIG"]: ICMP6PacketTooBig,
ICMP6Base._TypeOptions["TIME_EXCEEDED"]: ICMP6TimeExceeded,
ICMP6Base._TypeOptions["PARAMETER_PROBLEM"]: ICMP6ParameterProblem,
ICMP6Base._TypeOptions["ECHO_REQUEST"]: ICMP6EchoRequest,
ICMP6Base._TypeOptions["ECHO_REPLY"]: ICMP6EchoReply,
#ICMP6Base._TypeOptions["MULTICAST_LISTENER_QUERY"]:
#ICMP6Base._TypeOptions["MULTICAST_LISTENER_REPORT"]:
#ICMP6Base._TypeOptions["MULTICAST_LISTENER_DONE"]:
#ICMP6Base._TypeOptions["ROUTER_SOLICITATION"]:
#ICMP6Base._TypeOptions["ROUTER_ADVERTISEMENT"]:
ICMP6Base._TypeOptions["NEIGHBOUR_SOLICITATION"]: ICMP6NeighbourSolicitation,
ICMP6Base._TypeOptions["NEIGHBOUR_ADVERTISEMENT"]: ICMP6NeighbourAdvertisement,
#ICMP6Base._TypeOptions["REDIRECT"]:
#ICMP6Base._TypeOptions["ROUTER_RENUMBERING"]:
#ICMP6Base._TypeOptions["NODE_INFO_QUERY"]:
#ICMP6Base._TypeOptions["NODE_INFO_RESPONSE"]:
#ICMP6Base._TypeOptions["INVERSE_ND_SOLICITATION"]:
#ICMP6Base._TypeOptions["INVERSE_ND_ADV"]:
#ICMP6Base._TypeOptions["INVERSE_ND_ADV"]:
}
def ICMP6(*args):
"""
ICMP Factory Function.
"""
stub = ICMP6Base(*args)
if _ICMP6Jumptable.has_key(stub.icmp6type):
return _ICMP6Jumptable[stub.icmp6type](*args)
else:
return stub
class Ethernet(Protocol):
"""
Ethernet
Fields :
dst : Destination Address
src : Source Address
etype : Type of encapsulated protocol
length : Length of packet
"""
TYPE = "Ethernet"
_SIZEHINT = 14
TypeOptions = Options(
IP = 0x800,
ARP = 0x806,
RARP = 0x8035,
IP6 = 0x86DD,
PPPOE = 0x8864,
LOOPBACK = 0x9000
)
TYPE_JUMPER = utils.DoubleAssociation(
{
TypeOptions["IP"]: IP,
TypeOptions["ARP"]: ARP,
TypeOptions["RARP"]: ARP,
TypeOptions["IP6"]: IPv6,
}
)
# Fields
dst = EthernetAddress(0)
src = EthernetAddress(6)
etype = IntField(12, 2, options=TypeOptions)
length = 14
payload = Payload()
def _selfConstruct(self):
if self._next:
self.etype = self.TYPE_JUMPER.get(self._next.__class__, 0)
def _constructNext(self):
if self.TYPE_JUMPER.has_key(self.etype):
self._addProtocol(self.TYPE_JUMPER[self.etype], self.length)
def _getPayloadOffsets(self):
return self.length, len(self.packet) - self.length
def __repr__(self):
return "Eth: %s->%s"%(self.src, self.dst)
class _PFBase(Protocol):
"""
OpenBSD Specific.
PF Logs.
"""
TYPE="PF"
# Reasons
ReasonOptions = Options(
match = _sysvar.PFRES_MATCH,
badoff = _sysvar.PFRES_BADOFF,
frag = _sysvar.PFRES_FRAG,
short = _sysvar.PFRES_SHORT,
norm = _sysvar.PFRES_NORM,
memory = _sysvar.PFRES_MEMORY,
tstamp = _sysvar.PFRES_TS,
congest = _sysvar.PFRES_CONGEST,
ipoption = _sysvar.PFRES_IPOPTIONS,
protocksum = _sysvar.PFRES_PROTCKSUM,
state = _sysvar.PFRES_BADSTATE,
stateins = _sysvar.PFRES_STATEINS,
maxstates = _sysvar.PFRES_MAXSTATES,
srclimit = _sysvar.PFRES_SRCLIMIT,
synproxy = _sysvar.PFRES_SYNPROXY
)
# Actions
ActionOptions = Options(
drop = _sysvar.PFACT_DROP,
scrub = _sysvar.PFACT_SCRUB,
nat = _sysvar.PFACT_NAT,
nonat = _sysvar.PFACT_NONAT,
binat = _sysvar.PFACT_BINAT,
nobinat = _sysvar.PFACT_NOBINAT,
rdr = _sysvar.PFACT_RDR,
nordr = _sysvar.PFACT_NORDR,
synproxy_drop = _sysvar.PFACT_SYNPROXY_DROP,
# Ugly magic used because "pass" is a Python keyword.
**{ "pass" : _sysvar.PFACT_PASS}
)
# Directions
DirectionOptions = Options(
inout = _sysvar.PFDIR_INOUT,
out = _sysvar.PFDIR_OUT,
# Ugly magic used because "in" is a Python keyword.
**{ "in" : _sysvar.PFDIR_IN}
)
# SA Family Values
SAFamilyOptions = Options(
UNSPEC = 0,
LOCAL = 1,
INET = 2,
APPLETALK = 16,
LINK = 18,
INET6 = 24,
ENCAP = 28
)
class PFOld(_PFBase):
TYPE = "PFOld"
_SIZEHINT = _sysvar.IFNAMSIZ + 12
# Fields
safamily = IntField(0, 4, options=_PFBase.SAFamilyOptions)
ifname = PaddedString(4, _sysvar.IFNAMSIZ)
ruleno = IntField(4+_sysvar.IFNAMSIZ, 2)
reason = IntField(4+_sysvar.IFNAMSIZ+2, 2, options=_PFBase.ReasonOptions)
action = IntField(4+_sysvar.IFNAMSIZ+4, 2, options=_PFBase.ActionOptions)
direction = IntField(
4+_sysvar.IFNAMSIZ+6,
2,
options=_PFBase.DirectionOptions
)
length = _sysvar.IFNAMSIZ + 12
payload = Payload()
def _constructNext(self):
if AF_JUMPER.has_key(self.safamily):
self._addProtocol(AF_JUMPER[self.safamily], self.length)
def _getPayloadOffsets(self):
offset = self.length
dataLength = len(self.packet._data) - offset
return offset, dataLength
def __repr__(self):
reason = self.ReasonOptions.toStr(self.reason)
action = self.ActionOptions.toStr(self.action)
direction = self.DirectionOptions.toStr(self.direction)
return "Old PF rule %s (%s) %s %s on %s"%(
self.ruleno,
reason,
action,
direction,
self.ifname
)
class PF(_PFBase):
"""
OpenBSD Specific : PF
"""
_SIZEHINT = _sysvar.IFNAMSIZ + _sysvar.PF_RULESET_NAME_SIZE + 16
TYPE = "PF"
# Fields
length = IntField(0, 1) # Minus padding
safamily = IntField(1, 1, options=_PFBase.SAFamilyOptions)
action = IntField(2, 1, options=_PFBase.ActionOptions)
reason = IntField(3, 1, options=_PFBase.ReasonOptions)
ifname = PaddedString(4, _sysvar.IFNAMSIZ)
ruleset = PaddedString(4 + _sysvar.IFNAMSIZ, _sysvar.PF_RULESET_NAME_SIZE)
rulenr = IntField(4 + _sysvar.IFNAMSIZ + _sysvar.PF_RULESET_NAME_SIZE, 4)
# Note: if subrulenumber == ((1 << 32) -1), there is no subrule.
subrulenr = IntField(8 + _sysvar.IFNAMSIZ + _sysvar.PF_RULESET_NAME_SIZE, 4)
direction = IntField(
12 + _sysvar.IFNAMSIZ + _sysvar.PF_RULESET_NAME_SIZE,
1, options=_PFBase.DirectionOptions
)
pad = ByteField(13 + _sysvar.IFNAMSIZ + _sysvar.PF_RULESET_NAME_SIZE, 3)
payload = Payload()
def _constructNext(self):
if AF_JUMPER.has_key(self.safamily):
self._addProtocol(AF_JUMPER[self.safamily], self.length + 3)
def _getPayloadOffsets(self):
offset = self.length + 3
dataLength = len(self.packet._data) - offset
return offset, dataLength
def __repr__(self):
reason = self.ReasonOptions.toStr(self.reason)
action = self.ActionOptions.toStr(self.action)
direction = self.DirectionOptions.toStr(self.direction)
if self.subrulenr == ((1 << 32) - 1):
subrulenr = 0
else:
subrulenr = self.subrulenr
return "PF rule %s/%s (%s) %s %s on %s"%(
self.rulenr,
subrulenr,
reason,
action,
direction,
self.ifname
)
class Enc(Protocol):
"""
OpenBSD Specific.
Encapsulating Interface Protocol.
"""
_SIZEHINT = 12
_FlagsOptions = Options(
CONF = 0x0400,
AUTH = 0x0800,
AUTH_AH = 0x2000
)
TYPE = "Enc"
addressFamily = HOInt32Field(0)
spi = IntField(4, 4)
flags = HOInt32FlagsField(8, options=_FlagsOptions)
def _constructNext(self):
if AF_JUMPER.has_key(self.addressFamily):
# See print_enc.c in tcpdump - it chickens out by simply assuming
# that the next protocol in the chain is IP. We do the same,
# because the address family and flags are stored in host byte
# order, and we don't have any way of telling what "host byte
# order" is from here if we're reading from a pcap dump file...
#self._addProtocol(AF_JUMPER[self.addressFamily], 12)
self._addProtocol(AF_JUMPER[2], 12)
def __repr__(self):
options = []
for i in self._FlagsOptions.keys():
if self.flags & self._FlagsOptions[i]:
options.append(i)
return "Enc (%s)"%",".join(options)
class Loopback(Protocol):
"""
The NULL header at the head of packets found on the loopback interface.
length : Length
addressFamily : Address Family
"""
TYPE = "Loopback"
_SIZEHINT = 4
# AF Families
AFOptions = Options(
UNSPEC = 0,
LOCAL = 1,
INET = 2,
APPLETALK = 16,
LINK = 18,
INET6 = 24,
ENCAP = 28
)
# Fields
length = 4
addressFamily = IntField(0, 4, options=AFOptions)
payload = Payload()
def _constructNext(self):
if AF_JUMPER.has_key(self.addressFamily):
self._addProtocol(AF_JUMPER[self.addressFamily], self.length)
def _getPayloadOffsets(self):
return self.length, len(self.packet) - self.length
def _selfConstruct(self):
self.addressFamily = AF_JUMPER.get(
self._next.__class__, self.AFOptions["unspec"]
)
def __repr__(self):
# Intentionally returns an empty string. We don't normally want to know
# about loopback...
return ""
AF_JUMPER = utils.DoubleAssociation(
{
1: Loopback,
2: IP,
24: IPv6,
}
)
IP4_PROTO_JUMPER = utils.DoubleAssociation(
{
ProtocolOptions["ICMP"]: ICMP,
ProtocolOptions["IGMP"]: IGMP,
ProtocolOptions["IP_IN_IP"]: IP,
ProtocolOptions["TCP"]: TCP,
ProtocolOptions["UDP"]: UDP,
ProtocolOptions["AH"]: AH,
ProtocolOptions["ESP"]: ESP
}
)
IP6_PROTO_JUMPER = utils.DoubleAssociation(
{
ProtocolOptions["ICMP"]: ICMP,
ProtocolOptions["IGMP"]: IGMP,
ProtocolOptions["IP_IN_IP"]: IP,
ProtocolOptions["TCP"]: TCP,
ProtocolOptions["UDP"]: UDP,
ProtocolOptions["IPv6"]: IPv6,
ProtocolOptions["ROUTING_HEADER"]: IPv6RoutingHeader,
ProtocolOptions["FRAGMENTATION_HEADER"]: IPv6FragmentHeader,
ProtocolOptions["ESP"]: ESP,
ProtocolOptions["AH"]: AH,
ProtocolOptions["ICMP6"]: ICMP6,
ProtocolOptions["DESTINATION_OPTIONS_HEADER"]: IPv6DestinationOptionsHeader,
}
)
|
UTF-8
|
Python
| false | false | 2,009 |
19,146,964,248,522 |
10fb23bc83294a8a4be5fcb83cbd13e45138fc58
|
10389b92fc3fd4fd724cdc437595b25997d779e9
|
/oioioi/participants/models.py
|
de2372e3b93f7e10d09d0ffa8550338334e633e3
|
[
"GPL-3.0-only"
] |
non_permissive
|
CesarChodun/OIOIOI_site_developement_clone
|
https://github.com/CesarChodun/OIOIOI_site_developement_clone
|
661993676ce2f8c82f46a3be5ff2e5edfed67895
|
7f82fb16c9df658420375b7d2cfbcf56a78ceeaf
|
refs/heads/master
| 2020-08-27T07:13:03.177901 | 2014-03-17T12:31:59 | 2014-03-17T17:44:22 | 217,280,814 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from oioioi.base.fields import EnumRegistry, EnumField
from oioioi.base.utils.deps import check_django_app_dependencies
from oioioi.contests.models import Contest
from oioioi.participants.fields import \
OneToOneBothHandsCascadingParticipantField
check_django_app_dependencies(__name__, ['oioioi.contestexcl'])
participant_statuses = EnumRegistry()
participant_statuses.register('ACTIVE', _("Active"))
participant_statuses.register('BANNED', _("Banned"))
class Participant(models.Model):
contest = models.ForeignKey(Contest)
user = models.ForeignKey(User)
status = EnumField(participant_statuses, default='ACTIVE')
@property
def registration_model(self):
rcontroller = self.contest.controller.registration_controller()
model_class = rcontroller.get_model_class()
if model_class is None:
raise ObjectDoesNotExist
try:
return model_class.objects.get(participant=self)
except model_class.DoesNotExist:
raise ObjectDoesNotExist
class Meta(object):
unique_together = ('contest', 'user')
def __unicode__(self):
return unicode(self.user)
class RegistrationModel(models.Model):
participant = OneToOneBothHandsCascadingParticipantField(Participant,
related_name='%(app_label)s_%(class)s')
class Meta(object):
abstract = True
class TestRegistration(RegistrationModel):
"""Used only for testing"""
name = models.CharField(max_length=255)
|
UTF-8
|
Python
| false | false | 2,014 |
14,869,176,817,745 |
1ab133a329bfa676d297370bb3fddb7b48575fd5
|
d864592b0b746fe9e26a07215b5b518c5a199242
|
/mrpi2.py
|
237de08d5d594b2d801a31b60018ab4af2c4b29d
|
[] |
no_license
|
msemik/python-paralellization-experiments
|
https://github.com/msemik/python-paralellization-experiments
|
be3204cce947f483b1df70e70de983974551f10d
|
f4e0cd4b2253d96c0db9ab7d9e627fe49bf540b4
|
refs/heads/master
| 2018-02-20T10:20:10.291271 | 2011-11-16T22:00:47 | 2011-11-16T22:00:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from random import random
from math import pi
from multiprocessing import Pool
from time import time
# Assume radius equals 1
num_of_points = 1000000
num_of_workers = 2
points_per_worker = num_of_points/num_of_workers
real_number_of_points = points_per_worker*num_of_workers
def perform_an_experiment(experiment_num_of_points):
in_a_circle = lambda point: 1 >= point[0]**2 + point[1]**2
return sum((
in_a_circle((random(), random()))
for i in xrange(experiment_num_of_points)
))
if __name__ == "__main__":
workers = Pool(processes=num_of_workers)
start = time()
num_of_points_in_circle = workers.map(perform_an_experiment,
(points_per_worker for i in xrange(num_of_workers)))
num_of_points_in_circle = reduce(lambda x,y: x+y, num_of_points_in_circle)
mypi = float(num_of_points_in_circle) / float(real_number_of_points) * 4.0
end = time()
print "calculated value of pi: %f" % mypi
print "done in %f seconds" % (end-start)
print "pi value from math lib: %f" % pi
|
UTF-8
|
Python
| false | false | 2,011 |
7,876,970,061,008 |
8ef90684ab61a5b0b278b61e5a388ef25ec524df
|
9211b11bb0fa16dc895d3af17932166e61dde58c
|
/agent/modules/ganglia_c_module.py
|
5e9d68cd799d98bcb0d252ddcd7abaec1d171cca
|
[] |
no_license
|
renren/peach
|
https://github.com/renren/peach
|
9bc89ade51cdbc2d8fa396539347ad24b1eb166b
|
01542cde8c19bc1fff088e2385976c05d53dd585
|
refs/heads/master
| 2020-12-25T03:41:28.392168 | 2012-03-27T11:05:15 | 2012-03-27T11:05:15 | 3,343,941 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ctypes, os
import logging
from lazy import memoized
class foo(ctypes.Structure):
_fields_ = (('v', ctypes.c_int),
('c', ctypes.c_char))
def test_hello():
m = ctypes.cdll.LoadLibrary('./foo.so')
# export struct
f = foo.in_dll(m, 'crash')
print f.v, f.c
# function
hello = m.hello
hello.argtypes = [ctypes.POINTER(ctypes.c_char_p), ctypes.c_int]
hello.restype = ctypes.c_int
p = ctypes.c_char_p(0)
r = hello(ctypes.pointer(p), 7)
print r, repr(p.value)
destory = m.destory
destory.argtypes = [ctypes.c_char_p]
destory(p)
world = m.world
world.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_int]
world.restype = ctypes.c_int
p = ctypes.c_void_p(0)
r = world(ctypes.pointer(p), 7)
print r, repr(p.value)
destory(ctypes.c_char_p(p.value))
# test_hello()
class apr(object):
@classmethod
def init(cls):
cls.mod = ctypes.cdll.LoadLibrary('libapr-1.so.0')
cls.apr_pool_initialize = cls.mod.apr_pool_initialize
cls.apr_pool_terminate = cls.mod.apr_pool_terminate
cls.apr_pool_create = cls.mod.apr_pool_create_ex
cls.apr_pool_create.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
cls.apr_pool_create.restype = ctypes.c_int
#
cls.apr_pool_destroy = cls.mod.apr_pool_destroy
cls.apr_pool_destroy.argtypes = [ctypes.c_void_p]
@classmethod
def pool_create(cls):
cls.apr_pool_initialize()
nil = ctypes.c_void_p(0)
pool = ctypes.c_void_p(0)
status = cls.apr_pool_create(ctypes.pointer(pool), nil, nil, nil)
return pool
@classmethod
def pool_destroy(cls, pool):
cls.apr_pool_destroy(pool)
cls.apr_pool_terminate()
def test_class_apr():
apr.init()
p = apr.pool_create()
apr.pool_destroy(p)
def test_apr():
apr = ctypes.cdll.LoadLibrary('libapr-1.so.0')
#
apr_pool_initialize = apr.apr_pool_initialize
apr_pool_initialize.restype = ctypes.c_int
#
apr_pool_terminate = apr.apr_pool_terminate
#
apr_pool_create = apr.apr_pool_create_ex
apr_pool_create.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
apr_pool_create.restype = ctypes.c_int
#
apr_pool_destroy = apr.apr_pool_destroy
apr_pool_destroy.argtypes = [ctypes.c_void_p]
#
apr_pool_initialize()
nil = ctypes.c_void_p(0)
pool = ctypes.c_void_p(0)
ppool = ctypes.pointer(pool)
print pool, ppool.contents
status = apr_pool_create(ppool, nil, nil, nil)
print status, pool
apr_pool_destroy(pool)
apr_pool_terminate()
class Ganglia_25metric(ctypes.Structure):
_fields_ = [('key', ctypes.c_int),
('name', ctypes.c_char_p),
('tmax', ctypes.c_int),
('type', ctypes.c_int), # Ganglia_value_types
('units', ctypes.c_char_p),
('slope', ctypes.c_char_p),
('fmt', ctypes.c_char_p),
('msg_size', ctypes.c_int),
('desc', ctypes.c_char_p),
('metadata', ctypes.POINTER(ctypes.c_int))
]
def __repr__(self):
return "<metric> %s(%s) %s %d" % (self.name, self.desc,
self.key, self.type)
MAX_G_STRING_SIZE = 32
class val(ctypes.Union):
_fields_ = [('int8', ctypes.c_char),
('int16', ctypes.c_int16),
('uint16', ctypes.c_uint16),
('int32', ctypes.c_int32),
('uint32', ctypes.c_uint32),
('f', ctypes.c_float),
('d', ctypes.c_double),
('str', ctypes.c_char * MAX_G_STRING_SIZE)
]
def raw(self, type, fmt=None):
table = {1: 'str',
2: 'uint16',
3: 'int16',
4: 'uint32',
5: 'int32',
6: 'f',
7: 'd'}
if type in table:
v = getattr(self, table[type])
if fmt:
vf = fmt % v
if type in (2, 3, 4, 5):
v = int(vf)
elif type in (6, 7):
v = float(v)
return v
else:
raise ValueError('type %r error' % type)
MMODULE_MAGIC_COOKIE = 0x474D3331
class mmodule(ctypes.Structure):
_fields_ = [('version', ctypes.c_int),
('minor_version', ctypes.c_int),
('name', ctypes.c_char_p),
('dynamic_load_handle', ctypes.c_void_p),
('module_name', ctypes.c_char_p), # None
('metric_name', ctypes.c_char_p), # None
('module_params', ctypes.c_char_p), # None
('module_params_list', ctypes.c_void_p), # None
('config_file', ctypes.c_void_p), # None
('next', ctypes.c_void_p), # TODO: cast
('magic', ctypes.c_ulong),
('init', ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)), #
('cleanup', ctypes.CFUNCTYPE(None)), #
('metrics_info', ctypes.POINTER(Ganglia_25metric)),
('handler', ctypes.CFUNCTYPE(val, ctypes.c_int))
]
def __repr__(self):
return '<mmodule> %s %r' % (self.name, self.metrics_info.contents)
def metric(self, i):
assert self.metrics_info[i].name
return (self.metrics_info[i], self.handler)
def __len__(self):
i = 0
while self.metrics_info[i].name:
i += 1
return i
def run(self, index):
#if not hasattr(self, '_ml'):
# setattr(self, '_ml', self.build_metric_list())
info, handler = self.metric(index)
r = handler(index)
#return info.name, info.fmt % r.raw(info.type)
return info.name, r.raw(info.type, info.fmt)
@memoized
def global_pool():
apr.init()
return apr.pool_create()
def test_load(name = 'load_module', so = 'gmod32/modload.so', preload=None):
# 1 load
if preload:
g = ctypes.cdll.LoadLibrary(preload)
print g.debug_msg
mod = ctypes.cdll.LoadLibrary(so)
mm = mmodule.in_dll(mod, name)
print repr(mm)
# 2 setup_metric_callbacks
ret = mm.init(global_pool())
assert ret == 0
i = 0
while True:
print i, mm.metrics_info[i].name, mm.handler(i)
i += 1
if not mm.metrics_info[i].name:
break
print len(mm)
# 3 run once
for x in xrange(len(mm)):
print mm.run(x)
mm.cleanup()
class GangliaModule(object):
""" 'sys': {'boottime': '1327799592',
'machine_type': 'x86',
'mtu': '1500',
'os_name': 'Linux',
'os_release': '3.0.0-14-generic',
'sys_clock': '1327821240'}"""
DEFAULT_MODS = {
'load_module': 'modload.so',
'cpu_module': 'modcpu.so',
'disk_module': 'moddisk.so',
# 'example_module': 'modexample.so',
'multicpu_module': 'modmulticpu.so',
'proc_module': 'modproc.so',
'net_module': 'modnet.so',
'mem_module': 'modmem.so'
# 'sys_module': 'modsys.so',
}
def __init__(self, path=None):
self.mods = []
if path is None:
import platform;
bits,_ = platform.architecture()
path = 'gmod%s' % bits[:2]
if bits == '64bit': # hack
path = '/usr/lib64/ganglia'
elif bits == '32bit':
path = 'modules/gmod32'
for name, so in self.DEFAULT_MODS.iteritems():
try:
mod = ctypes.cdll.LoadLibrary(os.path.join(path, so))
except:
logging.warning('module %s %s load failed', path, so)
continue
mm = mmodule.in_dll(mod, name)
ret = mm.init(global_pool())
assert ret == 0
self.mods.append(mm)
def __del__(self):
for mm in self.mods:
mm.cleanup()
def run(self):
x = {}
for m in self.mods:
x.update(self.run_module(m))
return x
def run_module(self, mm):
i = 0
x = {}
while not not mm.metrics_info[i].name:
# TODO: try/except
name, val = mm.run(i)
x[name] = val
i += 1
def filename_to_name(fn):
return fn[1+fn.find('_'):fn.find('.')]
return {filename_to_name(mm.name) : x}
if __name__ == '__main__':
import sys
test_load(sys.argv[1], sys.argv[2])
ga = GangliaModule('/usr/lib64/ganglia') # /usr/lib64/ganglia/
import pprint
pprint.pprint(ga.run())
|
UTF-8
|
Python
| false | false | 2,012 |
18,030,272,736,074 |
ecddc1ae4543d1d6b210c2e897c36ce6164bcf6e
|
27a3411ab2c314fa82ee79a2d15b8b56aed38f80
|
/connectsynth/settings.py
|
648d403f04e4427394379c84b86a90cfbe87925f
|
[] |
no_license
|
asanoboy/connectsynth
|
https://github.com/asanoboy/connectsynth
|
0162d07b4eff59d685ab3894bbb29d9d3b012d9e
|
17dec63a9825b2ee8b786835232ee9cfc53f36b3
|
refs/heads/master
| 2020-12-25T19:15:49.903342 | 2012-12-23T03:15:22 | 2012-12-23T03:15:22 | 28,704,741 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from settings_base import *
import os
if os.environ.has_key('AWS_ACCESS_KEY_ID') and os.environ.has_key('AWS_SECRET_ACCESS_KEY') and os.environ.has_key('AWS_STORAGE_BUCKET_NAME'):
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
if os.environ.has_key("DEBUG") and os.environ["DEBUG"]=="1":
DEBUG = True
TEMPLATE_DEBUG = DEBUG
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
SESSION_COOKIE_SECURE = True
MIDDLEWARE_CLASSES += (
"connectsynth.utils.ForceSslMiddleware",
)
|
UTF-8
|
Python
| false | false | 2,012 |
16,415,365,013,455 |
a6441938c219b5000fad95036150e1401f324750
|
9af1661c005447fc922672224beb58877f698cc0
|
/hw1/prob3.py
|
ab3a262a6cdd29cba438e5e763d240eccf9887ec
|
[] |
no_license
|
ecnahc515/stats314
|
https://github.com/ecnahc515/stats314
|
04208a3818c6db30f8231babb16d12f08cfe2b94
|
d040fe79483eadc6996be3f5882a08eab0309f00
|
refs/heads/master
| 2020-04-08T22:18:02.428227 | 2013-01-19T07:58:56 | 2013-01-19T07:58:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages
tire_data = open('tire.txt', 'r')
# Two brands
brand_a, brand_b = [],[]
# Go through file, add column 1 to brand_a
# add column 2 to brand_b
for line in tire_data:
line = line.strip()
sline = line.split()
try:
brand_a.append(float(sline[0]))
brand_b.append(float(sline[1]))
except IndexError:
pass
tire_data.close()
both = [brand_a, brand_b]
pdf = PdfPages('tires.pdf')
figure()
boxplot(both, 0, 'rs', 0)
title('BoxPlot')
savefig(pdf, format='pdf')
close()
figure()
hist(brand_a)
title('Histogram for Brand A')
pdf.savefig()
close()
figure()
hist(brand_b)
title('Histogram for Brand B')
pdf.savefig()
close()
pdf.close()
|
UTF-8
|
Python
| false | false | 2,013 |
17,282,948,426,198 |
e42871cf25aa2a3e17eab034caf67ca38af27870
|
83e4bdfff3d52905481c8e556847a00c65b3fafb
|
/admin.py
|
b283d52d5021ec2887208a3e9fa5cc1856b222c7
|
[
"Unlicense"
] |
permissive
|
jbzdak/django-email-auth
|
https://github.com/jbzdak/django-email-auth
|
137aa39d485c2d77ffaf2494d10c40fc17a238f1
|
f3f4589d4769bf8a0c17ffe67f21035235f9d6a0
|
refs/heads/master
| 2020-08-23T16:08:39.609520 | 2012-11-18T19:10:24 | 2012-11-18T19:10:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'jb'
from django.contrib import admin
from django.contrib.auth.models import User
import admin_hacks
UserModelAdmin = admin.site._registry[User]
UserModelAdmin.add_form = admin_hacks.UserAddForm
UserModelAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
|
UTF-8
|
Python
| false | false | 2,012 |
13,357,348,296,778 |
b6f8fd42fc9668b7805e71977f945894590775b7
|
9d72ada271e6629432aac927ae1a72fcfac6f34b
|
/src/physics/simulator.py
|
0dcfe52709e8418eaed40afb21ce410074362bef
|
[
"GPL-1.0-or-later"
] |
non_permissive
|
isnotinvain/py-hurrr
|
https://github.com/isnotinvain/py-hurrr
|
78d4d4cfbb8e5e6cf49a34559d238c5d52e1b983
|
fb1c326cf5d91e6a42790c9452553c193c3a9565
|
refs/heads/master
| 2020-04-11T02:06:43.027049 | 2013-02-01T09:07:02 | 2013-02-01T09:07:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
==================================================================
hurrr by Alex Levenson
A 2D physics simulator powered by Box2D
NOTE:
method bodiesAtPoint modified from the Elements source (formerly: http://elements.linuxuser.at)
Can't seem to find the Elements project online though, so I don't know how to properly attribute
=================================================================
'''
import Box2D as box2d
from .. import lang
class Simulator(object):
"""
Encapsulates the Box2D simulation and
provides useful physics related functions
"""
def __init__(self, dimensions=((0,0),(100,100)), \
gravity=(0,0), \
velocityIterations=20, \
positionIterations=20, \
timeStep=1.0/60):
# set up box2D
worldAABB = box2d.b2AABB()
lower, upper = dimensions
worldAABB.lowerBound.Set(*lower)
worldAABB.upperBound.Set(*upper)
self.world = box2d.b2World(worldAABB, gravity, True) #doSleep=True
self.dimensions = dimensions
self.gravity = gravity
self.velocityIterations = velocityIterations
self.positionIterations = positionIterations
self.timeStep = timeStep
self.contactListener = ContactListener()
self.world.SetContactListener(self.contactListener)
def step(self):
'''
Advance the simulation one timestep
'''
self.world.Step(self.timeStep, self.velocityIterations, self.positionIterations)
def addBody(self, pos, parent=None, sleepFlag=True, isBullet=False, linearDamping=0.01, angularDamping=0.01):
'''
Add a body to the simulation at pos
Optionaly set parent in order to get from this body -> the game object it represents
'''
bodyDef = box2d.b2BodyDef()
bodyDef.position.Set(*pos)
bodyDef.sleepFlag = sleepFlag
bodyDef.isBullet = isBullet
bodyDef.linearDamping = linearDamping
bodyDef.angularDamping = angularDamping
body = self.world.CreateBody(bodyDef)
body.userData = {'parent':parent, 'id': id(body)}
return body
class ContactListener(box2d.b2ContactListener):
CONTACT_TYPES = lang.Enum.new("ADD", "PERSIST", "REMOVE", "RESULT")
def __init__(self):
box2d.b2ContactListener.__init__(self)
self.callbacks = {}
def connect(self, body, callback):
'''
Connects a callback to a body
Callback should be a function that accepts two parameters: CONTACT_TYPES type, TUPLE point
'''
self.callbacks[body.GetUserData()['id']] = cb
def disconnect(self, body):
'''
Remove a callback from a body
'''
if self.callbacks[body.GetUserData()['id']]:
del self.callbacks[body.GetUserData()['id']]
def Add(self, point):
b1 = point.shape1.GetBody().GetUserData()['id']
b2 = point.shape2.GetBody().GetUserData()['id']
if self.callbacks.has_key(b1):
self.callbacks[b1](CONTACT_TYPES.ADD, point)
if self.callbacks.has_key(b2):
self.callbacks[b2](CONTACT_TYPES.ADD, point)
def Persist(self, point):
b1 = point.shape1.GetBody().GetUserData()['id']
b2 = point.shape2.GetBody().GetUserData()['id']
if self.callbacks.has_key(b1):
self.callbacks[b1](CONTACT_TYPES.PERSIST, point)
if self.callbacks.has_key(b2):
self.callbacks[b2](CONTACT_TYPES.PERSIST, point)
def Remove(self, point):
b1 = point.shape1.GetBody().GetUserData()['id']
b2 = point.shape2.GetBody().GetUserData()['id']
if self.callbacks.has_key(b1):
self.callbacks[b1](CONTACT_TYPES.REMOVE, point)
if self.callbacks.has_key(b2):
self.callbacks[b2](CONTACT_TYPES.REMOVE, point)
def Result(self, point):
b1 = point.shape1.GetBody().GetUserData()['id']
b2 = point.shape2.GetBody().GetUserData()['id']
if self.callbacks.has_key(b1):
self.callbacks[b1](CONTACT_TYPES.RESULT, point)
if self.callbacks.has_key(b2):
self.callbacks[b2](CONTACT_TYPES.RESULT, point)
|
UTF-8
|
Python
| false | false | 2,013 |
18,047,452,598,595 |
cc811b7736736782ccf37d346ccfe5644756bd02
|
9e02bcd634f54d5b570273465cf646ddd3bc3901
|
/com/modle/using_tupel.py
|
d05e61cb81258a8fe7abd2e5eee9009a5e8d5537
|
[] |
no_license
|
luoluopan/pythonFisrt
|
https://github.com/luoluopan/pythonFisrt
|
cd8aafaa61bbf2cd7218771d4be3b07b2265f5f3
|
289da3c54ebf2b5061ad8dcbcef5afd25e7374a5
|
refs/heads/master
| 2016-09-06T02:06:43.804684 | 2013-10-30T07:27:48 | 2013-10-30T07:27:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'renfei'
zoo=("daxiang","tiger","shizi")
print(zoo)
for item in zoo:
print(item)
new_zoo = ("bird","tuzi",zoo)
print(new_zoo)
print(new_zoo[2][2])
age=23
name="daiwei"
print("%s is %d years old" % (name,age))
|
UTF-8
|
Python
| false | false | 2,013 |
16,896,401,359,266 |
804a1ccf2c3668986945569183f01a1f92c6065d
|
cb9083bbf490c76690a8b3dd5426986378215c1b
|
/code/StrategiesTwo.py
|
db912f5937cbbde5395c4945c7431173ddeddd03
|
[] |
no_license
|
Jasonvdm/waarheid
|
https://github.com/Jasonvdm/waarheid
|
b70a08dbcd776ec604041a632126e416c4c2bbde
|
723c9f0bb98df173d3a9447a745fc0cc2ebd06b0
|
refs/heads/master
| 2020-05-24T12:56:52.909184 | 2014-02-28T23:37:07 | 2014-02-28T23:37:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import Util
import string
def applyEndVerbTenseRule(translation):
if translation[-1][1] == 'V':
pastTense = Util.readDict("english_tenses")
if translation[-1][0] in pastTense:
translation[-1] = (pastTense[translation[-1][0]])
if Util.countVerbs(translation) > 1 and translation[-1][1] == 'V':
secondVerb = len(translation)
insertIndex = 0
startIndex = secondVerb
# if translation[secondVerb-1][1] in ['N','PN']:
# startIndex = secondVerb-1
for i in reversed(xrange(startIndex)):
if '\"' in translation[i][0] or ',' in translation[i][0] or ':' in translation[i][0]:
insertIndex = secondVerb
if translation[i][1] == 'V' and insertIndex == 0:
insertIndex = i + 1
newTranslation = []
for i in xrange(len(translation)):
if i == insertIndex:
newTranslation.append(translation[secondVerb])
if i == secondVerb: continue
newTranslation.append(translation[i])
if newTranslation[-1][0] == 'to':
return newTranslation[:-1]
return newTranslation
return translation
def applyDoubleNegativeRule(translation):
if Util.isNegative(translation[-1][0]):
return translation[:-1]
return translation
def applyQuestionRule(translation):
if translation[0][1] == "Q":
word = translation[-1][0]
translation[-1] = (word+"?",translation[-1][1])
return translation
def applyNounVerbRule(translation):
for i in xrange(1,len(translation)-1):
currTup = translation[i-1]
nextTup = translation[i]
nextNextTup = translation[i+1]
if currTup[1] == 'V' and nextTup[1] in ['N','PN'] and currTup[0] != 'is' and nextNextTup[1] != 'V':
translation[i-1] = nextTup
translation[i] = currTup
currTup = translation[-2]
nextTup = translation[-1]
if currTup[1] == 'V' and nextTup[1] in ['N','PN'] and currTup[0] != 'is':
translation[-2] = nextTup
translation[-1] = currTup
return translation
def applyQuoteTenseRule(translation):
startQuote = -1
endQuote = -1
pastTense = Util.readDict("english_tenses")
for i in xrange(len(translation)):
if '\"' in translation[i][0]:
if startQuote == -1:
startQuote = i
else:
endQuote = i
if startQuote == -1:
return translation
for i in xrange(len(translation)):
if i in xrange(startQuote,endQuote+1):
continue
if translation[i][1] == 'V' and translation[i][0] in pastTense:
translation[i] = pastTense[translation[i][0]]
return translation
def applySecondVerbRule(translation):
if Util.countVerbs(translation) > 1:
firstVerb = -1
secondVerb = -1
for i in xrange(len(translation)):
if translation[i][1] == 'V':
if firstVerb == -1:
firstVerb = i
else:
secondVerb = i
insertIndex = 0
startIndex = secondVerb
# if translation[secondVerb-1][1] in ['N','PN']:
# startIndex = secondVerb-1
for i in reversed(xrange(startIndex)):
if '\"' in translation[i][0] or ',' in translation[i][0] or ':' in translation[i][0]:
insertIndex = secondVerb
if translation[i][1] == 'V' and insertIndex == 0:
insertIndex = i + 1
newTranslation = []
for i in xrange(len(translation)):
if i == insertIndex:
newTranslation.append(translation[secondVerb])
if i == secondVerb: continue
newTranslation.append(translation[i])
if newTranslation[-1][0] == 'to':
return newTranslation[:-1]
return newTranslation
return translation
def applyStillContextRule(translation):
iIndex = -1
word = ""
for i in xrange(len(translation)):
if translation[i][0] == "still" and translation[i-1][0] not in ['is','are'] and translation[i+1][1] != 'V':
iIndex = i
word = "is"
if translation[i-1][0][-1] == 's':
word = "are"
newTranslation = []
for i in xrange(len(translation)):
if i == iIndex:
newTranslation.append((word,'V'))
newTranslation.append(translation[i])
return newTranslation
def applyCapitalizationPeriodRule(translation):
firstWord = translation[0][0]
if firstWord[0] not in string.ascii_letters:
newWord = firstWord[0] + firstWord[1].upper()
if len(firstWord)> 2:
newWord += firstWord[2:]
firstWord = newWord
else:
firstWord = firstWord[0].upper() + firstWord[1:]
translation[0] = (firstWord,translation[0][1])
lastWord = translation[-1][0]
if lastWord[-1] not in ['.','?','!']:
lastWord += '.'
translation[-1] = (lastWord,translation[-1][1])
return translation
def applyObjectRule(translation):
# Return if less than two verbs.
num_verbs = Util.countVerbs(translation)
if num_verbs < 2: return translation
# See if there N PN pattern exists.
obj = ''
index_of_object = 0
for i in xrange(len(translation) - 1):
cur_pos = translation[i][1]
next_pos = translation[i + 1][1]
if cur_pos == 'N' and next_pos == 'PN' or cur_pos == 'PN' and next_pos == 'PN':
if '\"' in translation[i][0] or ',' in translation[i][0] or ':' in translation[i][0]: continue
index_of_object = i + 1
obj = translation[i + 1][0]
# If we don't find a second pronoun, return.
if obj == '': return translation
# Bookkeeping.
count = 0
index_of_second_verb = 0
# Find the second verb.
for i in xrange(len(translation)):
part_of_speech = translation[i][1]
if part_of_speech == 'V':
count += 1
if count == 2:
index_of_second_verb = i
break
# Build up the new sentence.
new_sentence = []
for i in xrange(len(translation)):
if i == index_of_object: continue
new_sentence.append(translation[i])
if i == index_of_second_verb:
new_sentence.append((obj, 'PN'))
return new_sentence
|
UTF-8
|
Python
| false | false | 2,014 |
12,429,635,398,088 |
8ffa64ca64410369020dc0293a2a5dd1b72cdcb5
|
d011d54145eef58ddcbfcc50f5bbc0caa489acdc
|
/sc_web/api/logic.py
|
3cdde171e30beae36df27d7f633b075f6fd4f354
|
[] |
no_license
|
vasilenko-alexander/sc-web
|
https://github.com/vasilenko-alexander/sc-web
|
fce78ac9a6e2cb3d4b0fbda920f98e8db8f84d51
|
3285cf64e72bce563ac84a2f554128578afd7c87
|
refs/heads/master
| 2021-01-21T07:53:21.967054 | 2014-01-22T22:52:16 | 2014-01-22T22:52:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2012 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
from keynodes import KeynodeSysIdentifiers, Keynodes
from sctp.types import SctpIteratorType, ScElementType
from django.db.backends.dummy.base import DatabaseError
__all__ = (
'parse_menu_command',
'find_cmd_result',
'find_answer',
'find_translation',
'check_command_finished',
'append_to_system_elements',
)
def parse_menu_command(cmd_addr, sctp_client, keys):
"""Parse specified command from sc-memory and
return hierarchy map (with childs), that represent it
@param cmd_addr: sc-addr of command to parse
@param sctp_client: sctp client object to work with sc-memory
@param keys: keynodes object. Used just to prevent new instance creation
"""
keynode_ui_user_command_atom = keys[KeynodeSysIdentifiers.ui_user_command_atom]
keynode_ui_user_command_noatom = keys[KeynodeSysIdentifiers.ui_user_command_noatom]
keynode_nrel_decomposition = keys[KeynodeSysIdentifiers.nrel_decomposition]
# try to find command type
cmd_type = 'unknown'
if sctp_client.iterate_elements(SctpIteratorType.SCTP_ITERATOR_3F_A_F,
keynode_ui_user_command_atom,
ScElementType.sc_type_arc_pos_const_perm,
cmd_addr) is not None:
cmd_type = 'cmd_atom'
elif sctp_client.iterate_elements(SctpIteratorType.SCTP_ITERATOR_3F_A_F,
keynode_ui_user_command_noatom,
ScElementType.sc_type_arc_pos_const_perm,
cmd_addr) is not None:
cmd_type = 'cmd_noatom'
attrs = {}
attrs['cmd_type'] = cmd_type
attrs['id'] = cmd_addr.to_id()
# try to find decomposition
decomp = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5_A_A_F_A_F,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
cmd_addr,
ScElementType.sc_type_arc_pos_const_perm,
keynode_nrel_decomposition
)
if decomp is not None:
# iterate child commands
childs = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_3F_A_A,
decomp[0][0],
ScElementType.sc_type_arc_pos_const_perm,
ScElementType.sc_type_node | ScElementType.sc_type_const
)
if childs is not None:
child_commands = []
for item in childs:
child_structure = parse_menu_command(item[2], sctp_client, keys)
child_commands.append(child_structure)
attrs["childs"] = child_commands
return attrs
def find_cmd_result(command_addr, keynode_ui_nrel_command_result, sctp_client):
return sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
command_addr,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_link,
ScElementType.sc_type_arc_pos_const_perm,
keynode_ui_nrel_command_result
)
def find_answer(question_addr, keynode_nrel_answer, sctp_client):
return sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
question_addr,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
keynode_nrel_answer
)
def find_translation(construction_addr, keynode_nrel_translation, sctp_client):
return sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
construction_addr,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_link,
ScElementType.sc_type_arc_pos_const_perm,
keynode_nrel_translation
)
def find_translation_with_format(construction_addr, format_addr, keynode_nrel_format, keynode_nrel_translation, sctp_client):
translations = find_translation(construction_addr, keynode_nrel_translation, sctp_client)
if translations is None:
return None
for trans in translations:
link_addr = trans[2]
# check format
fmt = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_3F_A_F,
link_addr,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
format_addr
)
if fmt is not None:
return fmt[0][0]
return None
def check_command_finished(command_addr, keynode_command_finished, sctp_client):
return sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_3F_A_F,
keynode_command_finished,
ScElementType.sc_type_arc_pos_const_perm,
command_addr
)
def append_to_system_elements(sctp_client, keynode_system_element, el):
sctp_client.create_arc(
ScElementType.sc_type_arc_pos_const_perm,
keynode_system_element,
el
)
def get_link_mime(link_addr, keynode_nrel_format, keynode_nrel_mimetype, sctp_client):
mimetype_str = u'text/plain'
# determine format and mimetype
format = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
link_addr,
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
keynode_nrel_format
)
if format is not None:
# fetermine mimetype
mimetype = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
format[0][2],
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_link,
ScElementType.sc_type_arc_pos_const_perm,
keynode_nrel_mimetype
)
if mimetype is not None:
mime_value = sctp_client.get_link_content(mimetype[0][2])
if mime_value is not None:
mimetype_str = mime_value
return mimetype_str
def get_languages_list(keynode_languages, sctp_client):
res_langs = sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_3F_A_A,
keynode_languages,
ScElementType.sc_type_arc_pos_const_perm,
ScElementType.sc_type_node | ScElementType.sc_type_const
)
langs = []
if (res_langs is not None):
for items in res_langs:
langs.append(items[2].to_id())
return langs
# -------------- work with session -------------------------
class ScSession:
def __init__(self, user, session, sctp_client, keynodes):
"""Initialize session class with requets.user object
"""
self.user = user
self.session = session
self.sctp_client = sctp_client
self.keynodes = keynodes
self.sc_addr = None
def get_sc_addr(self):
"""Resolve sc-addr of session
"""
if not self.sc_addr:
if self.user.is_authenticated():
self.sc_addr = self._user_get_sc_addr()
if not self.sc_addr:
self.sc_addr = self._user_new()
else:
if not self.session.session_key:
self.session.save()
self.sc_addr = self._session_get_sc_addr()
if not self.sc_addr:
self.sc_addr = self._session_new_sc_addr()
# check sc-addr
#if not self.sctp_client.check_element(self.sc_addr):
# todo check user addr
return self.sc_addr
def get_used_language(self):
"""Returns sc-addr of currently used natural language
"""
results = self.sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
self.get_sc_addr(),
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_used_language]
)
if results:
return results[0][2]
# setup russian mode by default
_lang = self.keynodes[KeynodeSysIdentifiers.lang_ru]
self.set_current_lang_mode(_lang)
return _lang
def get_default_ext_lang(self):
"""Returns sc-addr of default external language
"""
results = self.sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
self.get_sc_addr(),
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_default_ext_language]
)
if results:
return results[0][2]
# setup scn mode by default
_lang = self.keynodes[KeynodeSysIdentifiers.scs_code]
self.set_default_ext_lang(_lang)
return _lang
def set_current_lang_mode(self, mode_addr):
"""Setup new language mode as current for this session
"""
# try to find currently used mode and remove it
results = self.sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
self.get_sc_addr(),
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_used_language]
)
if results:
self.sctp_client.erase_element(results[0][1])
arc = self.sctp_client.create_arc(ScElementType.sc_type_arc_common | ScElementType.sc_type_const, self.get_sc_addr(), mode_addr)
self.sctp_client.create_arc(ScElementType.sc_type_arc_pos_const_perm, self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_used_language], arc)
def set_default_ext_lang(self, lang_addr):
"""Setup new default external language
"""
# try to find default external language and remove it
results = self.sctp_client.iterate_elements(
SctpIteratorType.SCTP_ITERATOR_5F_A_A_A_F,
self.get_sc_addr(),
ScElementType.sc_type_arc_common | ScElementType.sc_type_const,
ScElementType.sc_type_node | ScElementType.sc_type_const,
ScElementType.sc_type_arc_pos_const_perm,
self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_default_ext_language]
)
if results:
self.sctp_client.erase_element(results[0][1])
arc = self.sctp_client.create_arc(ScElementType.sc_type_arc_common | ScElementType.sc_type_const, self.get_sc_addr(), lang_addr)
self.sctp_client.create_arc(ScElementType.sc_type_arc_pos_const_perm, self.keynodes[KeynodeSysIdentifiers.ui_nrel_user_default_ext_language], arc)
def _find_user_by_system_idtf(self, idtf):
value = self.sctp_client.find_element_by_system_identifier(str(idtf.encode('utf-8')))
return value
def _create_user_with_system_idtf(self, idtf):
keynode_ui_user = self.keynodes[KeynodeSysIdentifiers.ui_user]
# create user node
user_node = self.sctp_client.create_node(ScElementType.sc_type_node | ScElementType.sc_type_const)
self.sctp_client.create_arc(ScElementType.sc_type_arc_pos_const_perm, keynode_ui_user, user_node)
res = self.sctp_client.set_system_identifier(user_node, str(idtf.encode('utf-8')))
return user_node
def _session_new_sc_addr(self):
return self._create_user_with_system_idtf("session::" + str(self.session.session_key))
def _session_get_sc_addr(self):
return self._find_user_by_system_idtf("session::" + str(self.session.session_key))
def _user_new(self):
return self._create_user_with_system_idtf("user::" + str(self.user.username))
def _user_get_sc_addr(self):
return self._find_user_by_system_idtf("user::" + str(self.user.username))
|
UTF-8
|
Python
| false | false | 2,014 |
14,224,931,687,409 |
cdfa225abe83323582f708ca614a85221a59e95d
|
6743c228b8f39ee5d8622dcec732090bf6c97814
|
/plugin.audio.radiokolekcja.pl/default.py
|
f777151d895dfe55e9352883f7e27ea6987855e3
|
[
"GPL-2.0-only"
] |
non_permissive
|
isabella232/smuto
|
https://github.com/isabella232/smuto
|
60fa6f7134d13830fafc5278c4dcf5ebd3d1983f
|
31f9a1106361cf118e8ebbb7d0e19b3342cc7a50
|
refs/heads/master
| 2022-02-15T13:04:05.476300 | 2014-05-03T10:51:31 | 2014-05-03T10:51:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import sys
import urlparse
import xbmcaddon
import xbmcgui
import xbmcplugin
import xbmcvfs
import channels
def showChannels():
for channel in channels.CHANNELS:
logoImage = os.path.join(LOGO_PATH, str(channel.id) + '.png')
if xbmcvfs.exists(logoImage):
item = xbmcgui.ListItem(channel.name, iconImage = logoImage)
else:
item = xbmcgui.ListItem(channel.name, iconImage = ICON)
item.setProperty('IsPlayable', 'true')
item.setProperty('Fanart_Image', FANART)
item.setInfo(type = 'music', infoLabels = {
'title' : channel.name
})
xbmcplugin.addDirectoryItem(HANDLE, PATH + '?play=%d' % channel.id, item)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.endOfDirectory(HANDLE)
def play(idx):
channel = None
for c in channels.CHANNELS:
if c.id == int(idx):
channel = c
break
if channel is None:
return
logoImage = os.path.join(LOGO_PATH, str(channel.id) + '.png')
item = xbmcgui.ListItem(path = channel.url, thumbnailImage = logoImage)
item.setInfo(type = 'music', infoLabels = {
'title' : channel.name
})
xbmcplugin.setResolvedUrl(HANDLE, True, item)
if __name__ == '__main__':
ADDON = xbmcaddon.Addon()
PATH = sys.argv[0]
HANDLE = int(sys.argv[1])
PARAMS = urlparse.parse_qs(sys.argv[2][1:])
LOGO_PATH = os.path.join(ADDON.getAddonInfo('path'), 'resources', 'logos')
ICON = os.path.join(ADDON.getAddonInfo('path'), 'icon.png')
FANART = os.path.join(ADDON.getAddonInfo('path'), 'fanart.jpg')
if PARAMS.has_key('play'):
play(PARAMS['play'][0])
else:
showChannels()
|
UTF-8
|
Python
| false | false | 2,014 |
7,198,365,199,922 |
090f41637a32bb43b08f216e83e85afcd650585f
|
a86df5a524853d64962694a1486191d7317b2af5
|
/Server/modules/model/comms.py
|
c80a2ccebd226c19b456ebfee0ad89323f5bcd22
|
[] |
no_license
|
CarlAmbroselli/AuctionLoft
|
https://github.com/CarlAmbroselli/AuctionLoft
|
275efa05e4f7f254db7c0ae5a4e8a69e9dc7cde6
|
996d9dcbdb8293780900be22cdf9f85e8f1ff044
|
refs/heads/master
| 2021-01-09T09:35:00.611954 | 2014-05-19T20:41:20 | 2014-05-19T20:41:20 | 18,968,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python2.7
# Licensed under MIT License
from protorpc import messages
import datetime
from google.appengine.ext import ndb
from google.appengine.api import users
class Comm(messages.Message):
"""Item in the shop"""
comm_id = messages.StringField(1, required=True)
subject = messages.StringField(2, )#, required=True)
sender = messages.StringField(3) #uid of Sender
receiver = messages.StringField(4) #uid of receiver
timestamp = messages.StringField(5)
content = messages.StringField(6)
item_id = messages.StringField(7)
item_title = messages.StringField(8)
price = messages.StringField(9)
class CommDB(ndb.Model):
comm_id = ndb.StringProperty(required=True)
subject = ndb.StringProperty() #, required=True)
sender = ndb.StringProperty() #uid of Sender
receiver = ndb.StringProperty() #uid of receiver
timestamp = ndb.StringProperty()
content = ndb.StringProperty()
item_id = ndb.StringProperty()
item_title = ndb.StringProperty()
price = ndb.StringProperty()
|
UTF-8
|
Python
| false | false | 2,014 |
7,799,660,646,268 |
af996a05123c63f0b380add18841137dc33bd3e0
|
d2455f590c8a28efdbde8e0e525d6103cfbd89cc
|
/dndtools/dnd/views.py
|
1e19e166d9b5f55dd49955c55bbab4ec83877e56
|
[
"MIT"
] |
permissive
|
Aaron-M-Hill/dndtools
|
https://github.com/Aaron-M-Hill/dndtools
|
11a7fc0f65fce166193aaea56889db5d9138e21d
|
9a851be432f6403499e1e95ba50959827e3ebff8
|
refs/heads/master
| 2021-01-19T09:38:01.016230 | 2013-02-27T21:35:34 | 2013-02-27T21:35:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Create your views here.
from math import ceil
from django.contrib.auth.models import User
from django.core.mail.message import EmailMessage
from django.core.urlresolvers import reverse
from django.http import HttpResponsePermanentRedirect, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from reversion.revisions import revision
from dndtools.dnd.dnd_paginator import DndPaginator
from dndtools.dnd.filters import SpellFilter, CharacterClassFilter, RulebookFilter, FeatFilter, SpellDomainFilter, SpellDescriptorFilter, SkillFilter, RaceFilter, MonsterFilter, ItemFilter
from dndtools.dnd.forms import ContactForm, InaccurateContentForm
from dndtools.dnd.models import (Rulebook, DndEdition, FeatCategory, Feat,
SpellSchool, SpellDescriptor, SpellSubSchool,
Spell, CharacterClass, Domain, CharacterClassVariant, Skill, Race, SkillVariant, NewsEntry, StaticPage, Monster, Rule, Item)
from dndtools.dnd.utilities import int_with_commas
def permanent_redirect_view(request, view_name, args=None, kwargs=None):
url = reverse(view_name, args=args, kwargs=kwargs)
# get parameters
if len(request.GET) > 0:
#noinspection PyUnresolvedReferences
url += "?" + request.GET.urlencode()
return HttpResponsePermanentRedirect(url)
def permanent_redirect_object(request, object):
url = object.get_absolute_url()
# get parameters
if len(request.GET) > 0:
#noinspection PyUnresolvedReferences
url += "?" + request.GET.urlencode()
return HttpResponsePermanentRedirect(url)
def is_3e_edition(edition):
return edition.system == 'DnD 3.0'
def index(request):
newsEntries = NewsEntry.objects.filter(enabled=True).order_by('-published')[:15]
response = render_to_response('dnd/index.html', {'request': request, 'newsEntries': newsEntries, },
context_instance=RequestContext(request), )
if len(newsEntries):
response.set_cookie('top_news', newsEntries[0].pk, 10 * 365 * 24 * 60 * 60)
return response
def rulebook_list(request):
f = RulebookFilter(request.GET, queryset=Rulebook.objects.select_related(
'dnd_edition'))
form_submitted = 1 if 'name' in request.GET else 0
paginator = DndPaginator(f.qs, request)
return render_to_response('dnd/rulebook_list.html',
{'request': request,
'rulebook_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def edition_list(request):
edition_list = DndEdition.objects.all()
paginator = DndPaginator(edition_list, request)
return render_to_response('dnd/edition_list.html',
{'edition_list': paginator.items(),
'paginator': paginator,
'request': request,
}, context_instance=RequestContext(request), )
def edition_detail(request, edition_slug, edition_id):
dnd_edition = get_object_or_404(DndEdition, id=edition_id)
if dnd_edition.slug != edition_slug:
return permanent_redirect_view(request, 'edition_detail',
kwargs={'edition_slug': dnd_edition.slug,
'edition_id': dnd_edition.id, })
rulebook_list = dnd_edition.rulebook_set.select_related('dnd_edition').all()
paginator = DndPaginator(rulebook_list, request)
return render_to_response('dnd/edition_detail.html',
{'dnd_edition': dnd_edition,
'request': request,
'rulebook_list': paginator.items(),
'paginator': paginator,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(dnd_edition),
}, context_instance=RequestContext(request), )
def rulebook_detail(request, edition_slug, edition_id, rulebook_slug,
rulebook_id):
rulebook = get_object_or_404(Rulebook, id=rulebook_id)
if (rulebook.slug != rulebook_slug or
unicode(rulebook.dnd_edition.id) != edition_id or
rulebook.dnd_edition.slug != edition_slug):
return permanent_redirect_view(
request, 'rulebook_detail',
kwargs={
'edition_slug': rulebook.dnd_edition.slug,
'edition_id': rulebook.dnd_edition.id,
'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook.id, })
return render_to_response('dnd/rulebook_detail.html',
{'rulebook': rulebook,
'dnd_edition': rulebook.dnd_edition,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def feat_index(request):
f = FeatFilter(request.GET, queryset=Feat.objects.select_related(
'rulebook', 'rulebook__dnd_edition').distinct())
form_submitted = 1 if 'name' in request.GET else 0
paginator = DndPaginator(f.qs, request)
return render_to_response('dnd/feat_index.html',
{'request': request,
'feat_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def feat_list_by_rulebook(request):
rulebook_list = Rulebook.objects.select_related('rulebook',
'dnd_edition').all()
paginator = DndPaginator(rulebook_list, request)
return render_to_response('dnd/feat_list_by_rulebook.html',
{'rulebook_list': paginator.items(),
'paginator': paginator,
'request': request, }, context_instance=RequestContext(request), )
def feat_category_list(request):
feat_category_list = FeatCategory.objects.all()
paginator = DndPaginator(feat_category_list, request)
return render_to_response('dnd/feat_category_list.html',
{'feat_category_list': paginator.items(),
'paginator': paginator,
'request': request, }, context_instance=RequestContext(request), )
def feat_category_detail(request, category_slug):
feat_category = get_object_or_404(FeatCategory, slug=category_slug)
feat_list = feat_category.feat_set.select_related('rulebook',
'rulebook__dnd_edition').all()
paginator = DndPaginator(feat_list, request)
return render_to_response('dnd/feat_category_detail.html',
{'feat_category': feat_category,
'feat_list': paginator.items(),
'paginator': paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(), }, context_instance=RequestContext(request), )
def feats_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook.objects.select_related('dnd_edition'),
pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'feats_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
feat_list = rulebook.feat_set.select_related('rulebook',
'rulebook__dnd_edition').all()
paginator = DndPaginator(feat_list, request)
return render_to_response('dnd/feats_in_rulebook.html',
{'rulebook': rulebook,
'feat_list': paginator.items(),
'paginator': paginator,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def feat_detail(request, rulebook_slug, rulebook_id, feat_slug, feat_id):
feat = get_object_or_404(
Feat.objects.select_related('rulebook', 'rulebook__dnd_edition'),
pk=feat_id)
if (feat.slug != feat_slug or
unicode(feat.rulebook.id) != rulebook_id or
feat.rulebook.slug != rulebook_slug):
return permanent_redirect_view(request, 'feat_detail',
kwargs={
'rulebook_slug': feat.rulebook.slug,
'rulebook_id': feat.rulebook.id,
'feat_slug': feat.slug,
'feat_id': feat.id, })
feat_category_list = feat.feat_categories.select_related().all()
required_feats = feat.required_feats.select_related('required_feat',
'required_feat__rulebook').all()
required_by_feats = feat.required_by_feats.select_related('source_feat',
'source_feat__rulebook').all()
required_skills = feat.required_skills.select_related('skill').all()
special_prerequisities = feat.featspecialfeatprerequisite_set.select_related(
'special_feat_prerequisite').all()
# related feats
related_feats = Feat.objects.filter(slug=feat.slug).exclude(rulebook__id=feat.rulebook.id).select_related('rulebook', 'rulebook__dnd_edition').all()
return render_to_response('dnd/feat_detail.html',
{'feat': feat,
'rulebook': feat.rulebook,
'feat_category_list': feat_category_list,
'required_feats': required_feats,
'required_by_feats': required_by_feats,
'required_skills': required_skills,
'special_prerequisities': special_prerequisities,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(feat.rulebook.dnd_edition),
'related_feats': related_feats,
}, context_instance=RequestContext(request),
)
def spell_index(request):
f = SpellFilter(request.GET, queryset=Spell.objects.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').distinct())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/spell_index.html',
{'request': request,
'spell_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def spell_list_by_rulebook(request):
rulebook_list = Rulebook.objects.select_related('dnd_edition').all()
paginator = DndPaginator(rulebook_list, request)
return render_to_response('dnd/spell_list_by_rulebook.html',
{'request': request,
'rulebook_list': paginator.items(),
'paginator': paginator,
}, context_instance=RequestContext(request), )
def spell_descriptor_list(request):
f = SpellDescriptorFilter(request.GET,
queryset=SpellDescriptor.objects.all())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/spell_descriptor_list.html',
{'request': request,
'spell_descriptor_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def spell_school_list(request):
spell_school_list = SpellSchool.objects.all()
spell_sub_school_list = SpellSubSchool.objects.all()
return render_to_response('dnd/spell_school_list.html',
{'spell_school_list': spell_school_list,
'spell_sub_school_list': spell_sub_school_list,
'request': request, }, context_instance=RequestContext(request), )
def spells_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'spells_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
spell_list = rulebook.spell_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/spells_in_rulebook.html',
{'rulebook': rulebook,
'spell_list': paginator.items(),
'paginator': paginator,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def spell_detail(request, rulebook_slug, rulebook_id, spell_slug, spell_id):
spell = get_object_or_404(Spell.objects.select_related(
'rulebook', 'rulebook__dnd_edition', 'school', 'sub_school',
'class_levels'
), pk=spell_id)
if (spell.slug != spell_slug or
unicode(spell.rulebook.id) != rulebook_id or
spell.rulebook.slug != rulebook_slug):
return permanent_redirect_view(
request, 'spell_detail', kwargs={
'rulebook_slug': spell.rulebook.slug,
'rulebook_id': spell.rulebook.id,
'spell_slug': spell.slug,
'spell_id': spell_id,
}
)
spell_class_level_set = spell.spellclasslevel_set.select_related(
'rulebook', 'character_class',
).all()
spell_domain_level_set = spell.spelldomainlevel_set.select_related(
'rulebook', 'domain',
).all()
# related spells
related_spells = Spell.objects.filter(slug=spell.slug).exclude(rulebook__id=spell.rulebook.id).select_related('rulebook').all()
# corrupt component -- will be linked to corrupt rule
if spell.corrupt_component:
corrupt_rule = Rule.objects.filter(slug='corrupt-magic').all()[0]
else:
corrupt_rule = None
return render_to_response('dnd/spell_detail.html',
{'spell': spell,
'spellclasslevel_set': spell_class_level_set,
'spelldomainlevel_set': spell_domain_level_set,
'corrupt_rule': corrupt_rule,
'rulebook': spell.rulebook,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(spell.rulebook.dnd_edition),
'related_spells': related_spells,
},
context_instance=RequestContext(request),
)
def spell_descriptor_detail(request, spell_descriptor_slug):
spell_descriptor = get_object_or_404(SpellDescriptor,
slug=spell_descriptor_slug)
spell_list = spell_descriptor.spell_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/spell_descriptor_detail.html',
{'spell_descriptor': spell_descriptor,
'spell_list': paginator.items(),
'paginator': paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(), }, context_instance=RequestContext(request), )
def spell_school_detail(request, spell_school_slug):
spell_school = get_object_or_404(SpellSchool, slug=spell_school_slug)
spell_list = spell_school.spell_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/spell_school_detail.html',
{'spell_school': spell_school,
'spell_list': paginator.items(),
'paginator': paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(), }, context_instance=RequestContext(request), )
def spell_sub_school_detail(request, spell_sub_school_slug):
spell_sub_school = get_object_or_404(SpellSubSchool,
slug=spell_sub_school_slug)
spell_list = spell_sub_school.spell_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/spell_sub_school_detail.html',
{'spell_sub_school': spell_sub_school,
'spell_list': paginator.items(),
'paginator': paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(), }, context_instance=RequestContext(request), )
def spell_domain_list(request):
f = SpellDomainFilter(request.GET, queryset=Domain.objects.all())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/spell_domain_list.html',
{'request': request,
'spell_domain_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def spell_domain_detail(request, spell_domain_slug):
spell_domain = get_object_or_404(Domain,
slug=spell_domain_slug)
spell_list = spell_domain.spell_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/spell_domain_detail.html',
{'spell_domain': spell_domain,
'spell_list': paginator.items(),
'paginator': paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(), }, context_instance=RequestContext(request), )
def character_class_list(request):
f = CharacterClassFilter(
request.GET,
queryset=CharacterClass.objects.select_related(
'rulebook', 'rulebook__dnd_edition')
)
form_submitted = 1 if 'name' in request.GET else 0
paginator = DndPaginator(f.qs, request)
return render_to_response('dnd/character_class_list.html',
{'character_class_list': paginator.items(),
'paginator': paginator,
'request': request,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def character_class_detail(request, character_class_slug, rulebook_slug=None,
rulebook_id=None):
# fetch the class
character_class = get_object_or_404(CharacterClass.objects.select_related(
'character_class_variant', 'character_class_variant__rulebook'),
slug=character_class_slug)
assert isinstance(character_class, CharacterClass)
# fetch primary variant, this is independent of rulebook selected
try:
primary_variant = CharacterClassVariant.objects.select_related(
'rulebook', 'rulebook__dnd_edition',
).filter(
character_class=character_class,
).order_by('-rulebook__dnd_edition__core', '-rulebook__published')[0]
except Exception:
primary_variant = None
# if rulebook is supplied, select find this variant
if rulebook_slug and rulebook_id:
# use canonical link in head as this is more or less duplicated content
use_canonical_link = True
selected_variant = get_object_or_404(
CharacterClassVariant.objects.select_related(
'rulebook', 'character_class', 'rulebook__dnd_edition'),
character_class__slug=character_class_slug,
rulebook__pk=rulebook_id)
# possible malformed/changed slug
if rulebook_slug != selected_variant.rulebook.slug:
return permanent_redirect_object(request, selected_variant)
# selected variant is primary! Redirect to canonical url
if selected_variant == primary_variant:
return permanent_redirect_view(
request, character_class_detail, kwargs={
'character_class_slug': character_class_slug}
)
else:
# this is canonical, no need to specify it
use_canonical_link = False
selected_variant = primary_variant
other_variants = [
variant
for variant
in character_class .characterclassvariant_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'character_class') .all()
if variant != selected_variant
]
if selected_variant:
required_races = selected_variant.required_races.select_related('race', 'race__rulebook').all()
required_skills = selected_variant.required_skills.select_related('skill').all()
required_feats = selected_variant.required_feats.select_related('feat', 'feat__rulebook').all()
display_3e_warning = is_3e_edition(selected_variant.rulebook.dnd_edition)
else:
required_races = ()
required_skills = ()
required_feats = ()
display_3e_warning = False
return render_to_response('dnd/character_class_detail.html',
{'character_class': character_class,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'selected_variant': selected_variant,
'required_races': required_races,
'required_skills': required_skills,
'required_feats': required_feats,
'other_variants': other_variants,
'use_canonical_link': use_canonical_link,
'display_3e_warning': display_3e_warning,
}, context_instance=RequestContext(request),
)
def character_class_spells(request, character_class_slug, level):
character_class = get_object_or_404(CharacterClass,
slug=character_class_slug)
spell_list = Spell.objects.filter(
spellclasslevel__character_class=character_class.id,
spellclasslevel__level=level).select_related(
'rulebook',
'rulebook__dnd_edition',
'school')
paginator = DndPaginator(spell_list, request)
return render_to_response('dnd/character_class_spells.html',
{'character_class': character_class,
'spell_list': paginator.items(),
'paginator': paginator,
'level': level,
'request': request, }, context_instance=RequestContext(request),
)
def character_classes_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'character_classes_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
class_list = [
character_class_variant.character_class
for character_class_variant
in rulebook.characterclassvariant_set.select_related('character_class').all()
]
return render_to_response('dnd/character_classes_in_rulebook.html',
{'rulebook': rulebook,
'class_list': class_list,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def skill_list(request):
f = SkillFilter(request.GET, queryset=Skill.objects.all())
form_submitted = 1 if 'name' in request.GET else 0
paginator = DndPaginator(f.qs, request)
return render_to_response('dnd/skill_list.html',
{'request': request,
'skill_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def skill_detail(request, skill_slug, rulebook_slug=None,
rulebook_id=None):
# fetch the class
skill = get_object_or_404(Skill.objects.select_related(
'skill_variant', 'skill_variant__rulebook'),
slug=skill_slug)
# fetch primary variant, this is independent of rulebook selected
try:
primary_variant = SkillVariant.objects.select_related(
'rulebook', 'rulebook__dnd_edition',
).filter(
skill=skill,
).order_by('-rulebook__dnd_edition__core', '-rulebook__published')[0]
except Exception:
primary_variant = None
# if rulebook is supplied, select find this variant
if rulebook_slug and rulebook_id:
# use canonical link in head as this is more or less duplicated content
use_canonical_link = True
selected_variant = get_object_or_404(
SkillVariant.objects.select_related(
'rulebook', 'skill', 'rulebook__dnd_edition'),
skill__slug=skill_slug,
rulebook__pk=rulebook_id)
# possible malformed/changed slug
if rulebook_slug != selected_variant.rulebook.slug:
return permanent_redirect_object(request, selected_variant)
# selected variant is primary! Redirect to canonical url
if selected_variant == primary_variant:
return permanent_redirect_view(
request, skill_detail, kwargs={
'skill_slug': skill_slug}
)
else:
# this is canonical, no need to specify it
use_canonical_link = False
selected_variant = primary_variant
other_variants = [
variant
for variant
in skill .skillvariant_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'skill') .all()
if variant != selected_variant
]
if selected_variant:
display_3e_warning = is_3e_edition(selected_variant.rulebook.dnd_edition)
else:
display_3e_warning = False
feat_list = skill.required_by_feats.select_related('rulebook').all()
feat_paginator = DndPaginator(feat_list, request)
return render_to_response('dnd/skill_detail.html',
{'skill': skill,
'feat_list': feat_paginator.items(),
'feat_paginator': feat_paginator,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'selected_variant': selected_variant,
'other_variants': other_variants,
'use_canonical_link': use_canonical_link,
'display_3e_warning': display_3e_warning,
}, context_instance=RequestContext(request),
)
def skills_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'skills_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
skill_list = [
skill_variant.skill
for skill_variant
in rulebook.skillvariant_set.all()
]
return render_to_response('dnd/skill_in_rulebook.html',
{'rulebook': rulebook,
'skill_list': skill_list,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def monster_index(request):
f = MonsterFilter(request.GET, queryset=Monster.objects.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').distinct())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/monster_index.html',
{'request': request,
'monster_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def monster_list_by_rulebook(request):
rulebook_list = Rulebook.objects.select_related('dnd_edition').all()
paginator = DndPaginator(rulebook_list, request)
return render_to_response('dnd/monster_list_by_rulebook.html',
{'request': request,
'rulebook_list': paginator.items(),
'paginator': paginator,
}, context_instance=RequestContext(request), )
def monsters_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'monsters_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
monster_list = rulebook.monster_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(monster_list, request)
return render_to_response('dnd/monsters_in_rulebook.html',
{'rulebook': rulebook,
'monster_list': paginator.items(),
'paginator': paginator,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def monster_detail(request, rulebook_slug, rulebook_id, monster_slug, monster_id):
monster = get_object_or_404(
Monster.objects.select_related('rulebook', 'rulebook__dnd_edition', 'size',
'type', ),
pk=monster_id)
if (monster.slug != monster_slug or
unicode(monster.rulebook.id) != rulebook_id or
monster.rulebook.slug != rulebook_slug):
return permanent_redirect_view(request, 'monster_detail',
kwargs={
'rulebook_slug': monster.rulebook.slug,
'rulebook_id': monster.rulebook.id,
'monster_slug': monster.slug,
'monster_id': monster.id, })
if False:
monster = Monster()
monster_speeds = monster.monsterspeed_set.select_related('type', ).all()
monster_subtypes = monster.subtypes.all()
monster_skills = monster.skills.select_related('skill').all()
monster_feats = monster.feats.select_related('feat', 'feat__rulebook').all()
return render_to_response('dnd/monster_detail.html',
{'monster': monster,
'rulebook': monster.rulebook,
'request': request,
'monster_speeds': monster_speeds,
'monster_subtypes': monster_subtypes,
'monster_skills': monster_skills,
'monster_feats': monster_feats,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(monster.rulebook.dnd_edition),
}, context_instance=RequestContext(request),
)
def race_index(request):
f = RaceFilter(request.GET, queryset=Race.objects.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').distinct())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/race_index.html',
{'request': request,
'race_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def race_list_by_rulebook(request):
rulebook_list = Rulebook.objects.select_related('dnd_edition').all()
paginator = DndPaginator(rulebook_list, request)
return render_to_response('dnd/race_list_by_rulebook.html',
{'request': request,
'rulebook_list': paginator.items(),
'paginator': paginator,
}, context_instance=RequestContext(request), )
def races_in_rulebook(request, rulebook_slug, rulebook_id):
rulebook = get_object_or_404(Rulebook, pk=rulebook_id)
if not rulebook.slug == rulebook_slug:
return permanent_redirect_view(request, 'races_in_rulebook',
kwargs={'rulebook_slug': rulebook.slug,
'rulebook_id': rulebook_id, })
race_list = rulebook.race_set.select_related(
'rulebook', 'rulebook__dnd_edition', 'school').all()
paginator = DndPaginator(race_list, request)
return render_to_response('dnd/races_in_rulebook.html',
{'rulebook': rulebook,
'race_list': paginator.items(),
'paginator': paginator,
'request': request,
'display_3e_warning': is_3e_edition(rulebook.dnd_edition),
}, context_instance=RequestContext(request), )
def race_detail(request, rulebook_slug, rulebook_id, race_slug, race_id):
race = get_object_or_404(
Race.objects.select_related('rulebook', 'rulebook__dnd_edition', 'size', 'base_monster'),
pk=race_id)
if (race.slug != race_slug or
unicode(race.rulebook.id) != rulebook_id or
race.rulebook.slug != rulebook_slug):
return permanent_redirect_view(request, 'race_detail',
kwargs={
'rulebook_slug': race.rulebook.slug,
'rulebook_id': race.rulebook.id,
'race_slug': race.slug,
'race_id': race.id, })
race_speeds = race.racespeed_set.select_related('type', ).all()
if not race_speeds and race.base_monster:
base_monster_race_speeds = race.base_monster.monsterspeed_set.select_related('type', ).all()
else:
base_monster_race_speeds = None
favored_classes = race.favored_classes.select_related('character_class', ).all()
return render_to_response('dnd/race_detail.html',
{'race': race,
'rulebook': race.rulebook,
'request': request,
'race_speeds': race_speeds,
'base_monster_race_speeds': base_monster_race_speeds,
'favored_classes': favored_classes,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(race.rulebook.dnd_edition),
}, context_instance=RequestContext(request),
)
def item_index(request):
f = ItemFilter(request.GET, queryset=Item.objects.select_related(
'rulebook', 'rulebook__dnd_edition').distinct())
paginator = DndPaginator(f.qs, request)
form_submitted = 1 if 'name' in request.GET else 0
return render_to_response('dnd/item_index.html',
{'request': request,
'item_list': paginator.items(),
'paginator': paginator,
'filter': f,
'form_submitted': form_submitted,
}, context_instance=RequestContext(request), )
def item_detail(request, rulebook_slug, rulebook_id, item_slug, item_id):
item = get_object_or_404(Item.objects.select_related(
'rulebook', 'rulebook__dnd_edition', 'body_slot', 'aura', 'spellschool_set',
'activation', 'required_feats', 'required_spells', 'property', 'synergy_prerequisite',
), pk=item_id)
assert isinstance(item, Item)
if (item.slug != item_slug or
unicode(item.rulebook.id) != rulebook_id or
item.rulebook.slug != rulebook_slug):
return permanent_redirect_view(
request, 'item_detail', kwargs={
'rulebook_slug': item.rulebook.slug,
'rulebook_id': item.rulebook.id,
'item_slug': item.slug,
'item_id': item_id,
}
)
required_feats = item.required_feats.select_related('rulebook').all()
required_spells = item.required_spells.select_related('rulebook').all()
cost_to_create = item.cost_to_create
# calculate CTC
if not cost_to_create:
if item.price_gp and not item.price_bonus:
cost_to_create = "%s gp, %s XP, %d day(s)" % (int_with_commas(ceil(item.price_gp / 2.0)), int_with_commas(ceil(item.price_gp / 25.0)), ceil(item.price_gp / 1000.0))
elif not item.price_gp and item.price_bonus:
cost_to_create = "Varies"
return render_to_response('dnd/item_detail.html',
{'item': item,
'aura_schools': item.aura_schools.all(),
'required_feats': required_feats,
'required_spells': required_spells,
'cost_to_create': cost_to_create,
'rulebook': item.rulebook,
'request': request,
# enum
'ItemType': Item.ItemType,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(item.rulebook.dnd_edition),
},
context_instance=RequestContext(request),
)
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST,
initial={'captcha': request.META['REMOTE_ADDR']})
if form.is_valid():
if form.cleaned_data['sender']:
headers = {'Reply-To': form.cleaned_data['sender']}
else:
headers = {}
email = EmailMessage(
subject=form.cleaned_data['subject'],
body="%s\n\nfrom: %s" % (form.cleaned_data['message'],
form.cleaned_data['sender']),
from_email='mailer@dndtools.eu',
to=('dndtoolseu@googlegroups.com', 'dndtools.eu@gmail.com'),
headers=headers,
)
email.send()
# Redirect after POST
return HttpResponseRedirect(reverse('contact_sent'))
else:
form = ContactForm() # An unbound form
# request context required for CSRF
return render_to_response('dnd/contact.html',
{'request': request,
'form': form, }, context_instance=RequestContext(request), )
def contact_sent(request):
return render_to_response('dnd/contact_sent.html',
{'request': request,
}, context_instance=RequestContext(request), )
def inaccurate_content(request):
if request.method == 'POST':
form = InaccurateContentForm(request.POST, initial={
'captcha': request.META['REMOTE_ADDR']})
if form.is_valid():
if form.cleaned_data['sender']:
headers = {'Reply-To': form.cleaned_data['sender']}
else:
headers = {}
email = EmailMessage(
subject='Problem in url %s' % form.cleaned_data['url'],
body="Message: %s\n\nUrl: %s\n\nBetter desc:%s\nFrom: %s" % (
form.cleaned_data['message'], form.cleaned_data['url'],
form.cleaned_data['better_description'],
form.cleaned_data['sender']),
from_email='mailer@dndtools.eu',
to=('dndtoolseu@googlegroups.com', 'dndtools.eu@gmail.com'),
headers=headers,
)
email.send()
# Redirect after POST
return HttpResponseRedirect(reverse('inaccurate_content_sent'))
else:
form = InaccurateContentForm(
initial={'url': request.GET.get('url', ''),
})
return render_to_response('dnd/inaccurate_content.html',
{'request': request,
'form': form, }, context_instance=RequestContext(request), )
def inaccurate_content_sent(request):
return render_to_response('dnd/inaccurate_content_sent.html',
{'request': request,
}, context_instance=RequestContext(request), )
def staff(request):
page_body = StaticPage.objects.filter(name='staff')[0]
return render_to_response('dnd/staff.html',
{'request': request,
'page_body': page_body,
}, context_instance=RequestContext(request), )
@revision.create_on_success
def very_secret_url(request):
log = ''
#noinspection PyUnresolvedReferences
revision.comment = "Automatic (updating PHB spell pages)"
#noinspection PyUnresolvedReferences
revision.user = User.objects.get(username='dndtools')
# counter = 1
#
# phb = Rulebook.objects.get(abbr='PH')
#
# for line in data.split('\n'):
# line = line.strip()
# m = re.match('([^\t]+)\tPH \t(\d+)', line)
# if m:
# spells = Spell.objects.filter(rulebook=phb, slug=slugify(m.group(1).strip()))
# spell = spells[0] if spells else None
#
# if spell and spell.page is None:
# spell.page = m.group(2).strip()
# spell.save()
#
# message = '%05d %s saved\n' % (counter, spell)
# log += message
# print message,
# counter += 1
# else:
# message = '%05d %s IGNORED\n' % (counter, spell)
# log += message
# print message,
# counter += 1
return render_to_response('dnd/very_secret_url.html',
{'request': request,
'log': log,
}, context_instance=RequestContext(request), )
def rule_detail(request, rulebook_slug, rulebook_id, rule_slug, rule_id):
rule = get_object_or_404(
Rule.objects.select_related('rulebook', 'rulebook__dnd_edition'),
pk=rule_id)
if (rule.slug != rule_slug or
unicode(rule.rulebook.id) != rulebook_id or
rule.rulebook.slug != rulebook_slug):
return permanent_redirect_view(request, 'rule_detail',
kwargs={
'rulebook_slug': rule.rulebook.slug,
'rulebook_id': rule.rulebook.id,
'rule_slug': rule.slug,
'rule_id': rule.id, })
return render_to_response('dnd/rule_detail.html',
{'rule': rule,
'rulebook': rule.rulebook,
'request': request,
'i_like_it_url': request.build_absolute_uri(),
'inaccurate_url': request.build_absolute_uri(),
'display_3e_warning': is_3e_edition(rule.rulebook.dnd_edition),
}, context_instance=RequestContext(request),
)
|
UTF-8
|
Python
| false | false | 2,013 |
5,446,018,547,124 |
56e275fca12a640fbba71d31bfa6020021f3ae78
|
a3eddd6693bfa81e28104ff42721e92c310a08b0
|
/src/nintendo_gw_web_shop/webshop/urls.py
|
990b8db3cd2f2f3820c8a14cf4b1bc3b568a672c
|
[
"MIT"
] |
permissive
|
mplaine/nintendo-gw-web-shop
|
https://github.com/mplaine/nintendo-gw-web-shop
|
61889ddc1a87a103ff5a6cfad7a6cdcccba4609e
|
097f291ff8f5869aea63a81124742fa02f4c8723
|
refs/heads/master
| 2021-01-10T21:14:11.013204 | 2011-09-08T19:22:02 | 2011-09-08T19:22:02 | 32,458,278 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls.defaults import *
from django.contrib import admin
import settings
admin.autodiscover()
urlpatterns = patterns( '',
( r'^$', 'webshop.views.root' ),
( r'^home/$', 'webshop.views.home' ),
( r'^home/category/(?P<type_id>\d+)/', 'webshop.views.category' ),
( r'^home/search$', 'webshop.views.search' ),
( r'^register/$', 'webshop.views.register' ),
( r'^login/$', 'webshop.views.login' ),
( r'^passwordreset/$', 'webshop.views.password_reset' ),
( r'^passwordreset/requestnewpassword/$', 'webshop.views.request_new_password' ), # Request new password
( r'^passwordreset/newpasswordrequested/$', 'webshop.views.new_password_requested' ), # New password requested
( r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm', {'template_name' : 'webshop/passwordreset/new_password.html', 'post_reset_redirect': '/webshop/passwordreset/newpasswordset/' } ), # New Password
( r'^passwordreset/newpasswordset/$', 'webshop.views.new_password_set' ), # New password set
( r'^logout/$', 'webshop.views.logout' ),
( r'^myaccount/$', 'webshop.views.myaccount' ),
( r'^myaccount/accountdetails/$', 'webshop.views.account_details' ),
( r'^myaccount/changepassword/$', 'webshop.views.change_password' ),
( r'^myaccount/addressbook/$', 'webshop.views.address_book' ),
( r'^myaccount/addressbook/new/$', 'webshop.views.address_book_new' ),
( r'^myaccount/addressbook/(?P<address_id>\d+)/edit/$', 'webshop.views.address_book_edit' ),
( r'^myaccount/addressbook/(?P<address_id>\d+)/delete/$', 'webshop.views.address_book_delete' ),
( r'^myaccount/completedorders/$', 'webshop.views.completed_orders' ),
( r'^admin/$', 'webshop.views.admin' ),
( r'^admin/paidorders/$', 'webshop.views.admin_paid_orders' ),
( r'^admin/deliveredorders/$', 'webshop.views.admin_delivered_orders' ),
( r'^admin/statistics/$', 'webshop.views.admin_statistics' ),
( r'^about/$', 'webshop.views.about' ),
( r'^credits/$', 'webshop.views.credits' ),
( r'^cart/$', 'webshop.views.cart' ),
( r'^cart/add/$', 'webshop.views.add_to_cart' ),
( r'^cart/update$', 'webshop.views.update_cart' ),
( r'^cart/empty/$', 'webshop.views.empty_cart' ),
( r'^payment/pay$', 'webshop.views.payment_pay'),
( r'^payment/confirm$', 'webshop.views.payment_confirm'),
( r'^payment/success$', 'webshop.views.payment_success'),
( r'^payment/cancel$', 'webshop.views.payment_cancel'),
( r'^payment/error$', 'webshop.views.payment_error'),
#Ajax urls
( r'^ajax/product/(?P<product_id>\d+)/rating/$', 'webshop.views.rating'),
( r'^ajax/product/(?P<product_id>\d+)/comment/$', 'webshop.views.comment'),
( r'^ajax/products/$', 'webshop.views.ajaxproducts'),
( r'^ajax/statistics/commentcount/$', 'webshop.views.ajaxstatisticscommentcount'),
( r'^ajax/product/(?P<product_id>\d+)/view/$', 'webshop.views.addproductview'),
( r'^ajax/product/view/$', 'webshop.views.productview'),
( r'^ajax/comment/(?P<comment_id>\d+)/delete/$', 'webshop.views.commentdelete'),
# Static files
( r'^static/(?P<path>.*)$', 'django.views.static.serve', { 'document_root' : settings.MEDIA_ROOT } ),
)
|
UTF-8
|
Python
| false | false | 2,011 |
8,177,617,752,114 |
6c50b5cb87368678b9c1c566821f2355fe9526be
|
046cafbfb4b2d025b61c30aa5805f134a51c7b7b
|
/ljn/ui/component/ArticleList.py
|
6f337f8ccabf89e89a5c6f117fc9b64a04fc68c8
|
[] |
no_license
|
leojay/ljn
|
https://github.com/leojay/ljn
|
d9834b50a567fd7f2e1f72fe64735e73ee092c58
|
322c220bccb0108410b02601b34a8f7741904823
|
refs/heads/master
| 2019-01-01T05:39:06.623174 | 2012-03-19T09:12:35 | 2012-03-19T09:12:35 | 3,626,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#coding:utf8
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtGui import QListWidget, QListWidgetItem, QInputDialog, QMessageBox
from ljn.Model import Article, Category
from ljn.Repository import get_session
from ljn.ui.UiUtil import create_widget_action
from ljn.ui.component import ArticleEditor
class ArticleItem(QListWidgetItem):
def __init__(self, article):
QListWidgetItem.__init__(self, article.title)
self.article = article
class ArticleList(QListWidget):
onArticleCreated = pyqtSignal(int)
def __init__(self, parent):
QListWidget.__init__(self, parent)
self.category_id = None
self.update_articles()
self.addAction(create_widget_action(self, "F2", self._rename_article))
self.addAction(create_widget_action(self, "Del", self._del_article))
self.addAction(create_widget_action(self, "CTRL+N", self._new_article))
def update_articles(self, category_id=None):
self.clear()
self.addItem('..')
if category_id is None:
category_id = self.category_id
if category_id is None:
return
self.category_id = category_id
category = Category.find_by_id(get_session(), category_id)
if category is not None:
for a in category.articles:
self.addItem(ArticleItem(a))
def _new_article(self):
article = ArticleEditor.create_new_article(self)
if article is None:
return
article.category_id = self.category_id
s = get_session()
s.add(article)
s.commit()
self.update_articles()
self.onArticleCreated.emit(article.id)
def _rename_article(self):
items = self.selectedItems()
if not items:
return
article = items[0].article
text, result = QInputDialog.getText(self, 'Rename article', 'New article title:', text=article.title)
if not result:
return
text = str(text)
if not text or text == article.title:
return
s = get_session()
a = Article.find_by_id(s, article.id)
a.title = text
s.commit()
self.update_articles()
def _del_article(self):
items = self.selectedItems()
if not items:
return
article = items[0].article
msg = 'Delete "%s"?' % article.title
btn = QMessageBox.question(self, 'Delete article', msg, QMessageBox.Yes | QMessageBox.No)
if btn == QMessageBox.No:
return
s = get_session()
s.delete(Article.find_by_id(s, article.id))
s.commit()
self.update_articles()
|
UTF-8
|
Python
| false | false | 2,012 |
5,188,320,528,945 |
6ae72d569eca6428c75533ecd4d2ccff796c0444
|
0b842bcb3bf20e1ce628d39bf7e11abd7699baf9
|
/oscar/include/a/elwood/report/reports_pb2.py
|
c96b54e601f43998b59cfb06f4d02f321a70c0f2
|
[] |
no_license
|
afeset/miner2-tools
|
https://github.com/afeset/miner2-tools
|
75cc8cdee06222e0d81e39a34f621399e1ceadee
|
81bcc74fe7c0ca036ec483f634d7be0bab19a6d0
|
refs/heads/master
| 2016-09-05T12:50:58.228698 | 2013-08-27T21:09:56 | 2013-08-27T21:09:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='include/a/elwood/report/reports.proto',
package='a.elwood.report',
serialized_pb='\n%include/a/elwood/report/reports.proto\x12\x0f\x61.elwood.report\x1a&include/a/infra/prov/definitions.proto\x1a include/a/infra/net/packet.proto\x1a$include/a/elwood/client/client.proto\"J\n\x10ReporterSetupGpb\x12\x1e\n\x16maxNumOfReportsInQueue\x18\x01 \x01(\x04\x12\x16\n\x0e\x61\x63tivateReport\x18\x02 \x01(\x08\"\\\n\x0eTupleReportGpb\x12\x10\n\x08\x63lientIP\x18\x01 \x01(\t\x12\x12\n\nclientPort\x18\x02 \x01(\r\x12\x10\n\x08serverIP\x18\x03 \x01(\t\x12\x12\n\nserverPort\x18\x04 \x01(\r\"Q\n\x10PacketsReportGpb\x12\x13\n\x0bpacketCount\x18\x01 \x01(\x04\x12\x13\n\x0bL2byteCount\x18\x02 \x01(\x04\x12\x13\n\x0bL7byteCount\x18\x03 \x01(\x04\"Q\n\x1dPerAdapterPacketStatisticsGpb\x12\x30\n\x05stats\x18\x01 \x03(\x0b\x32!.a.elwood.report.PacketsReportGpb\"2\n\tTextField\x12\x11\n\tfieldName\x18\x01 \x01(\t\x12\x12\n\nfieldValue\x18\x02 \x01(\t\"<\n\nRequestGpb\x12\x0b\n\x03url\x18\x01 \x01(\t\x12!\n\x06header\x18\x02 \x01(\x0b\x32\x11.a.net.PayloadGpb\"\x9d\x03\n\x0bResponseGpb\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.a.net.PayloadGpb\x12\x14\n\x0cresponseCode\x18\x02 \x01(\x03\x12\x16\n\x0e\x63hecksumResult\x18\x03 \x01(\x04\x12\x16\n\x0e\x63hecksumLength\x18\x04 \x01(\x04\x12\x17\n\x08wasSaved\x18\x05 \x01(\x08:\x05\x66\x61lse\x12 \n\x11wasAlreadyExisted\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x08\x66ileName\x18\x07 \x01(\t\x12\"\n\x13wasProgressAnalyzed\x18\x08 \x01(\x08:\x05\x66\x61lse\x12<\n\x10progressInterval\x18\t \x01(\x0b\x32\".a.elwood.client.SampleIntervalGpb\x12;\n\x0fprogressSamples\x18\n \x03(\x0b\x32\".a.elwood.report.ProgressSampleGpb\x12\x39\n\x0eprogressPauses\x18\x0b \x03(\x0b\x32!.a.elwood.report.ProgressPauseGpb\"n\n\x0eSessionInfoGpb\x12,\n\x07request\x18\x01 \x01(\x0b\x32\x1b.a.elwood.report.RequestGpb\x12.\n\x08response\x18\x02 \x01(\x0b\x32\x1c.a.elwood.report.ResponseGpb\"W\n\x11ProgressSampleGpb\x12\x15\n\rtimeOffsetSec\x18\x01 \x01(\x04\x12\x17\n\x0f\x64ownloadedBytes\x18\x02 \x01(\r\x12\x12\n\ntotalBytes\x18\x03 \x01(\r\"m\n\x10ProgressPauseGpb\x12\x38\n\rpauseInterval\x18\x01 \x01(\x0b\x32!.a.elwood.client.PauseIntervalGpb\x12\x1f\n\x17\x61\x63tualPauseDurationMsec\x18\x02 \x01(\x04\"\xd6\x01\n\rFlowReportGpb\x12.\n\x05tuple\x18\x01 \x01(\x0b\x32\x1f.a.elwood.report.TupleReportGpb\x12<\n\x11\x63lientPacketStats\x18\x02 \x01(\x0b\x32!.a.elwood.report.PacketsReportGpb\x12<\n\x11serverPacketStats\x18\x03 \x01(\x0b\x32!.a.elwood.report.PacketsReportGpb\x12\x19\n\nwasStopped\x18\x04 \x01(\x08:\x05\x66\x61lse\"\x95\x01\n\x12\x43\x61pReplayReportGpb\x12\x46\n\x0e\x61\x64\x61pterTxStats\x18\x01 \x01(\x0b\x32..a.elwood.report.PerAdapterPacketStatisticsGpb\x12\x1b\n\x13\x44\x65tectedIPAddresses\x18\x02 \x03(\t\x12\x1a\n\x0fnumFlowsStopped\x18\x03 \x01(\x04:\x01\x30\"\x8a\x01\n\x11L7StreamReportGpb\x12-\n\x05\x66lows\x18\x01 \x03(\x0b\x32\x1e.a.elwood.report.FlowReportGpb\x12\x46\n\x0e\x61\x64\x61pterTxStats\x18\x02 \x01(\x0b\x32..a.elwood.report.PerAdapterPacketStatisticsGpb\"\xc2\x01\n\x18LiveConsumptionReportGpb\x12.\n\x05tuple\x18\x01 \x01(\x0b\x32\x1f.a.elwood.report.TupleReportGpb\x12-\n\x04info\x18\x02 \x01(\x0b\x32\x1f.a.elwood.report.SessionInfoGpb\x12\x34\n\x05stats\x18\x03 \x01(\x0b\x32%.a.elwood.client.SessionStatisticsGpb\x12\x11\n\tinterface\x18\x04 \x01(\t\"\xa1\x01\n\x16SniffedPacketReportGpb\x12\'\n\rpacketContent\x18\x01 \x01(\x0b\x32\x10.a.net.PacketGpb\x12\x19\n\x11isIpChecksumValid\x18\x02 \x01(\x08\x12\x1a\n\x12isTcpChecksumValid\x18\x03 \x01(\x08\x12\x11\n\tinterface\x18\x04 \x01(\t\x12\x14\n\x0c\x64\x65layMiliSec\x18\x05 \x01(\x04\"&\n\x0e\x45rrorReportGpb\x12\x14\n\x0c\x65rrorMessage\x18\x01 \x01(\t\"\xf9\x02\n\x12\x43ommonReportHeader\x12\x13\n\x0bsequenceNum\x18\x01 \x01(\x04\x12\x10\n\x08reportID\x18\x02 \x01(\x04\x12\x11\n\tstartTime\x18\x03 \x01(\x04\x12\x18\n\x10\x64urationMicrosec\x18\x04 \x01(\x04\x12\xa0\x01\n\nreportType\x18\x05 \x01(\x0e\x32..a.elwood.report.CommonReportHeader.ReportType:\x05kNoneBU\x8a\xb5\x18Q1=None, 2=Cap Replay, 3=L7 Stream, 4=Live Consumption, 5=Sniffed Packet , 6=Error\"l\n\nReportType\x12\t\n\x05kNone\x10\x01\x12\x0e\n\nkCapReplay\x10\x02\x12\r\n\tkL7Stream\x10\x03\x12\x14\n\x10kLiveConsumption\x10\x04\x12\x12\n\x0ekSniffedPacket\x10\x05\x12\n\n\x06kError\x10\x06\"\x86\x03\n\x0f\x45lwoodReportGpb\x12\x33\n\x06header\x18\x01 \x01(\x0b\x32#.a.elwood.report.CommonReportHeader\x12<\n\x0f\x63\x61pReplayReport\x18\x02 \x01(\x0b\x32#.a.elwood.report.CapReplayReportGpb\x12:\n\x0eL7StreamReport\x18\x03 \x01(\x0b\x32\".a.elwood.report.L7StreamReportGpb\x12H\n\x15LiveConsumptionReport\x18\x04 \x01(\x0b\x32).a.elwood.report.LiveConsumptionReportGpb\x12\x44\n\x13sniffedPacketReport\x18\x05 \x01(\x0b\x32\'.a.elwood.report.SniffedPacketReportGpb\x12\x34\n\x0b\x45rrorReport\x18\x06 \x01(\x0b\x32\x1f.a.elwood.report.ErrorReportGpb\"H\n\x14\x45lwoodReportBatchGpb\x12\x30\n\x06report\x18\x01 \x03(\x0b\x32 .a.elwood.report.ElwoodReportGpb\"B\n\x1eReportBatchRetrievalRequestGpb\x12 \n\x15maxNumOfReportsToSend\x18\x01 \x01(\r:\x01\x31')
_COMMONREPORTHEADER_REPORTTYPE = descriptor.EnumDescriptor(
name='ReportType',
full_name='a.elwood.report.CommonReportHeader.ReportType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='kNone', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='kCapReplay', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='kL7Stream', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='kLiveConsumption', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='kSniffedPacket', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='kError', index=5, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2529,
serialized_end=2637,
)
_REPORTERSETUPGPB = descriptor.Descriptor(
name='ReporterSetupGpb',
full_name='a.elwood.report.ReporterSetupGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='maxNumOfReportsInQueue', full_name='a.elwood.report.ReporterSetupGpb.maxNumOfReportsInQueue', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='activateReport', full_name='a.elwood.report.ReporterSetupGpb.activateReport', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=170,
serialized_end=244,
)
_TUPLEREPORTGPB = descriptor.Descriptor(
name='TupleReportGpb',
full_name='a.elwood.report.TupleReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='clientIP', full_name='a.elwood.report.TupleReportGpb.clientIP', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='clientPort', full_name='a.elwood.report.TupleReportGpb.clientPort', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverIP', full_name='a.elwood.report.TupleReportGpb.serverIP', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverPort', full_name='a.elwood.report.TupleReportGpb.serverPort', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=246,
serialized_end=338,
)
_PACKETSREPORTGPB = descriptor.Descriptor(
name='PacketsReportGpb',
full_name='a.elwood.report.PacketsReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='packetCount', full_name='a.elwood.report.PacketsReportGpb.packetCount', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='L2byteCount', full_name='a.elwood.report.PacketsReportGpb.L2byteCount', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='L7byteCount', full_name='a.elwood.report.PacketsReportGpb.L7byteCount', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=340,
serialized_end=421,
)
_PERADAPTERPACKETSTATISTICSGPB = descriptor.Descriptor(
name='PerAdapterPacketStatisticsGpb',
full_name='a.elwood.report.PerAdapterPacketStatisticsGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='stats', full_name='a.elwood.report.PerAdapterPacketStatisticsGpb.stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=423,
serialized_end=504,
)
_TEXTFIELD = descriptor.Descriptor(
name='TextField',
full_name='a.elwood.report.TextField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='fieldName', full_name='a.elwood.report.TextField.fieldName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fieldValue', full_name='a.elwood.report.TextField.fieldValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=506,
serialized_end=556,
)
_REQUESTGPB = descriptor.Descriptor(
name='RequestGpb',
full_name='a.elwood.report.RequestGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='a.elwood.report.RequestGpb.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='header', full_name='a.elwood.report.RequestGpb.header', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=558,
serialized_end=618,
)
_RESPONSEGPB = descriptor.Descriptor(
name='ResponseGpb',
full_name='a.elwood.report.ResponseGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='header', full_name='a.elwood.report.ResponseGpb.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='responseCode', full_name='a.elwood.report.ResponseGpb.responseCode', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='checksumResult', full_name='a.elwood.report.ResponseGpb.checksumResult', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='checksumLength', full_name='a.elwood.report.ResponseGpb.checksumLength', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='wasSaved', full_name='a.elwood.report.ResponseGpb.wasSaved', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='wasAlreadyExisted', full_name='a.elwood.report.ResponseGpb.wasAlreadyExisted', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fileName', full_name='a.elwood.report.ResponseGpb.fileName', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='wasProgressAnalyzed', full_name='a.elwood.report.ResponseGpb.wasProgressAnalyzed', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='progressInterval', full_name='a.elwood.report.ResponseGpb.progressInterval', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='progressSamples', full_name='a.elwood.report.ResponseGpb.progressSamples', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='progressPauses', full_name='a.elwood.report.ResponseGpb.progressPauses', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=621,
serialized_end=1034,
)
_SESSIONINFOGPB = descriptor.Descriptor(
name='SessionInfoGpb',
full_name='a.elwood.report.SessionInfoGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='request', full_name='a.elwood.report.SessionInfoGpb.request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='response', full_name='a.elwood.report.SessionInfoGpb.response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1036,
serialized_end=1146,
)
_PROGRESSSAMPLEGPB = descriptor.Descriptor(
name='ProgressSampleGpb',
full_name='a.elwood.report.ProgressSampleGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='timeOffsetSec', full_name='a.elwood.report.ProgressSampleGpb.timeOffsetSec', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='downloadedBytes', full_name='a.elwood.report.ProgressSampleGpb.downloadedBytes', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='totalBytes', full_name='a.elwood.report.ProgressSampleGpb.totalBytes', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1148,
serialized_end=1235,
)
_PROGRESSPAUSEGPB = descriptor.Descriptor(
name='ProgressPauseGpb',
full_name='a.elwood.report.ProgressPauseGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='pauseInterval', full_name='a.elwood.report.ProgressPauseGpb.pauseInterval', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='actualPauseDurationMsec', full_name='a.elwood.report.ProgressPauseGpb.actualPauseDurationMsec', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1237,
serialized_end=1346,
)
_FLOWREPORTGPB = descriptor.Descriptor(
name='FlowReportGpb',
full_name='a.elwood.report.FlowReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='tuple', full_name='a.elwood.report.FlowReportGpb.tuple', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='clientPacketStats', full_name='a.elwood.report.FlowReportGpb.clientPacketStats', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serverPacketStats', full_name='a.elwood.report.FlowReportGpb.serverPacketStats', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='wasStopped', full_name='a.elwood.report.FlowReportGpb.wasStopped', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1349,
serialized_end=1563,
)
_CAPREPLAYREPORTGPB = descriptor.Descriptor(
name='CapReplayReportGpb',
full_name='a.elwood.report.CapReplayReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='adapterTxStats', full_name='a.elwood.report.CapReplayReportGpb.adapterTxStats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='DetectedIPAddresses', full_name='a.elwood.report.CapReplayReportGpb.DetectedIPAddresses', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='numFlowsStopped', full_name='a.elwood.report.CapReplayReportGpb.numFlowsStopped', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1566,
serialized_end=1715,
)
_L7STREAMREPORTGPB = descriptor.Descriptor(
name='L7StreamReportGpb',
full_name='a.elwood.report.L7StreamReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='flows', full_name='a.elwood.report.L7StreamReportGpb.flows', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='adapterTxStats', full_name='a.elwood.report.L7StreamReportGpb.adapterTxStats', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1718,
serialized_end=1856,
)
_LIVECONSUMPTIONREPORTGPB = descriptor.Descriptor(
name='LiveConsumptionReportGpb',
full_name='a.elwood.report.LiveConsumptionReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='tuple', full_name='a.elwood.report.LiveConsumptionReportGpb.tuple', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='info', full_name='a.elwood.report.LiveConsumptionReportGpb.info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='stats', full_name='a.elwood.report.LiveConsumptionReportGpb.stats', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='interface', full_name='a.elwood.report.LiveConsumptionReportGpb.interface', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1859,
serialized_end=2053,
)
_SNIFFEDPACKETREPORTGPB = descriptor.Descriptor(
name='SniffedPacketReportGpb',
full_name='a.elwood.report.SniffedPacketReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='packetContent', full_name='a.elwood.report.SniffedPacketReportGpb.packetContent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='isIpChecksumValid', full_name='a.elwood.report.SniffedPacketReportGpb.isIpChecksumValid', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='isTcpChecksumValid', full_name='a.elwood.report.SniffedPacketReportGpb.isTcpChecksumValid', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='interface', full_name='a.elwood.report.SniffedPacketReportGpb.interface', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='delayMiliSec', full_name='a.elwood.report.SniffedPacketReportGpb.delayMiliSec', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2056,
serialized_end=2217,
)
_ERRORREPORTGPB = descriptor.Descriptor(
name='ErrorReportGpb',
full_name='a.elwood.report.ErrorReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='errorMessage', full_name='a.elwood.report.ErrorReportGpb.errorMessage', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2219,
serialized_end=2257,
)
_COMMONREPORTHEADER = descriptor.Descriptor(
name='CommonReportHeader',
full_name='a.elwood.report.CommonReportHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='sequenceNum', full_name='a.elwood.report.CommonReportHeader.sequenceNum', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reportID', full_name='a.elwood.report.CommonReportHeader.reportID', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='startTime', full_name='a.elwood.report.CommonReportHeader.startTime', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='durationMicrosec', full_name='a.elwood.report.CommonReportHeader.durationMicrosec', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reportType', full_name='a.elwood.report.CommonReportHeader.reportType', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\212\265\030Q1=None, 2=Cap Replay, 3=L7 Stream, 4=Live Consumption, 5=Sniffed Packet , 6=Error')),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMONREPORTHEADER_REPORTTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2260,
serialized_end=2637,
)
_ELWOODREPORTGPB = descriptor.Descriptor(
name='ElwoodReportGpb',
full_name='a.elwood.report.ElwoodReportGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='header', full_name='a.elwood.report.ElwoodReportGpb.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='capReplayReport', full_name='a.elwood.report.ElwoodReportGpb.capReplayReport', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='L7StreamReport', full_name='a.elwood.report.ElwoodReportGpb.L7StreamReport', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='LiveConsumptionReport', full_name='a.elwood.report.ElwoodReportGpb.LiveConsumptionReport', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sniffedPacketReport', full_name='a.elwood.report.ElwoodReportGpb.sniffedPacketReport', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='ErrorReport', full_name='a.elwood.report.ElwoodReportGpb.ErrorReport', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2640,
serialized_end=3030,
)
_ELWOODREPORTBATCHGPB = descriptor.Descriptor(
name='ElwoodReportBatchGpb',
full_name='a.elwood.report.ElwoodReportBatchGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='report', full_name='a.elwood.report.ElwoodReportBatchGpb.report', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3032,
serialized_end=3104,
)
_REPORTBATCHRETRIEVALREQUESTGPB = descriptor.Descriptor(
name='ReportBatchRetrievalRequestGpb',
full_name='a.elwood.report.ReportBatchRetrievalRequestGpb',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='maxNumOfReportsToSend', full_name='a.elwood.report.ReportBatchRetrievalRequestGpb.maxNumOfReportsToSend', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3106,
serialized_end=3172,
)
import include.a.infra.prov.definitions_pb2
import include.a.infra.net.packet_pb2
import include.a.elwood.client.client_pb2
_PERADAPTERPACKETSTATISTICSGPB.fields_by_name['stats'].message_type = _PACKETSREPORTGPB
_REQUESTGPB.fields_by_name['header'].message_type = include.a.infra.net.packet_pb2._PAYLOADGPB
_RESPONSEGPB.fields_by_name['header'].message_type = include.a.infra.net.packet_pb2._PAYLOADGPB
_RESPONSEGPB.fields_by_name['progressInterval'].message_type = include.a.elwood.client.client_pb2._SAMPLEINTERVALGPB
_RESPONSEGPB.fields_by_name['progressSamples'].message_type = _PROGRESSSAMPLEGPB
_RESPONSEGPB.fields_by_name['progressPauses'].message_type = _PROGRESSPAUSEGPB
_SESSIONINFOGPB.fields_by_name['request'].message_type = _REQUESTGPB
_SESSIONINFOGPB.fields_by_name['response'].message_type = _RESPONSEGPB
_PROGRESSPAUSEGPB.fields_by_name['pauseInterval'].message_type = include.a.elwood.client.client_pb2._PAUSEINTERVALGPB
_FLOWREPORTGPB.fields_by_name['tuple'].message_type = _TUPLEREPORTGPB
_FLOWREPORTGPB.fields_by_name['clientPacketStats'].message_type = _PACKETSREPORTGPB
_FLOWREPORTGPB.fields_by_name['serverPacketStats'].message_type = _PACKETSREPORTGPB
_CAPREPLAYREPORTGPB.fields_by_name['adapterTxStats'].message_type = _PERADAPTERPACKETSTATISTICSGPB
_L7STREAMREPORTGPB.fields_by_name['flows'].message_type = _FLOWREPORTGPB
_L7STREAMREPORTGPB.fields_by_name['adapterTxStats'].message_type = _PERADAPTERPACKETSTATISTICSGPB
_LIVECONSUMPTIONREPORTGPB.fields_by_name['tuple'].message_type = _TUPLEREPORTGPB
_LIVECONSUMPTIONREPORTGPB.fields_by_name['info'].message_type = _SESSIONINFOGPB
_LIVECONSUMPTIONREPORTGPB.fields_by_name['stats'].message_type = include.a.elwood.client.client_pb2._SESSIONSTATISTICSGPB
_SNIFFEDPACKETREPORTGPB.fields_by_name['packetContent'].message_type = include.a.infra.net.packet_pb2._PACKETGPB
_COMMONREPORTHEADER.fields_by_name['reportType'].enum_type = _COMMONREPORTHEADER_REPORTTYPE
_COMMONREPORTHEADER_REPORTTYPE.containing_type = _COMMONREPORTHEADER;
_ELWOODREPORTGPB.fields_by_name['header'].message_type = _COMMONREPORTHEADER
_ELWOODREPORTGPB.fields_by_name['capReplayReport'].message_type = _CAPREPLAYREPORTGPB
_ELWOODREPORTGPB.fields_by_name['L7StreamReport'].message_type = _L7STREAMREPORTGPB
_ELWOODREPORTGPB.fields_by_name['LiveConsumptionReport'].message_type = _LIVECONSUMPTIONREPORTGPB
_ELWOODREPORTGPB.fields_by_name['sniffedPacketReport'].message_type = _SNIFFEDPACKETREPORTGPB
_ELWOODREPORTGPB.fields_by_name['ErrorReport'].message_type = _ERRORREPORTGPB
_ELWOODREPORTBATCHGPB.fields_by_name['report'].message_type = _ELWOODREPORTGPB
class ReporterSetupGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REPORTERSETUPGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ReporterSetupGpb)
class TupleReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TUPLEREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.TupleReportGpb)
class PacketsReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PACKETSREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.PacketsReportGpb)
class PerAdapterPacketStatisticsGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PERADAPTERPACKETSTATISTICSGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.PerAdapterPacketStatisticsGpb)
class TextField(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TEXTFIELD
# @@protoc_insertion_point(class_scope:a.elwood.report.TextField)
class RequestGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REQUESTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.RequestGpb)
class ResponseGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RESPONSEGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ResponseGpb)
class SessionInfoGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SESSIONINFOGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.SessionInfoGpb)
class ProgressSampleGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PROGRESSSAMPLEGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ProgressSampleGpb)
class ProgressPauseGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PROGRESSPAUSEGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ProgressPauseGpb)
class FlowReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLOWREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.FlowReportGpb)
class CapReplayReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CAPREPLAYREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.CapReplayReportGpb)
class L7StreamReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _L7STREAMREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.L7StreamReportGpb)
class LiveConsumptionReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LIVECONSUMPTIONREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.LiveConsumptionReportGpb)
class SniffedPacketReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SNIFFEDPACKETREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.SniffedPacketReportGpb)
class ErrorReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ERRORREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ErrorReportGpb)
class CommonReportHeader(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _COMMONREPORTHEADER
# @@protoc_insertion_point(class_scope:a.elwood.report.CommonReportHeader)
class ElwoodReportGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELWOODREPORTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ElwoodReportGpb)
class ElwoodReportBatchGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELWOODREPORTBATCHGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ElwoodReportBatchGpb)
class ReportBatchRetrievalRequestGpb(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REPORTBATCHRETRIEVALREQUESTGPB
# @@protoc_insertion_point(class_scope:a.elwood.report.ReportBatchRetrievalRequestGpb)
# @@protoc_insertion_point(module_scope)
|
UTF-8
|
Python
| false | true | 2,013 |
7,808,250,562,531 |
89af27427a7bb482bb12b9c0262a57394c3acb4c
|
04238ed79f7439c8f7842b34ed5e8a01b58ba24d
|
/helpers/mapcanvas.py
|
b2c7ba738b47298f4125b88269c8487fc38f9e45
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"GPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] |
non_permissive
|
jirivrany/riskflow123d-post
|
https://github.com/jirivrany/riskflow123d-post
|
cd81c68d70274dc125c1f5af147145ec0e564ee4
|
f429edf1b324ac6b2912eab90bb118f2f5c436a4
|
refs/heads/master
| 2020-08-06T00:04:30.666697 | 2014-05-22T06:18:40 | 2014-05-22T06:18:40 | 16,613,958 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 28 Nov 2013
@author: albert
'''
from PyQt4.QtGui import QWidget, QSizePolicy, QVBoxLayout
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
from ruzne import value_set
class MapCanvas(QWidget):
def __init__(self, triangulation, options, parent=None):
super(MapCanvas, self).__init__(parent)
self.triang = triangulation
self.first_run = True
self.lab_x = options['xlabel'] if value_set(options, 'xlabel') else 'mesh X coord'
self.lab_y = options['ylabel'] if value_set(options, 'ylabel') else 'mesh Y coord'
self.title = options['title'] if value_set(options, 'title') else 'Map of concentrations'
self.setWindowTitle(self.title)
self.create_main_frame()
self.on_draw()
def create_main_frame(self):
'''
main frame of the window creates sublots and toolbar
'''
self.fig, self.axes = plt.subplots()
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
self.setLayout(vbox)
def on_draw(self):
'''
draw the chart
'''
self.axes.clear()
self.axes.set_aspect('equal')
tri = self.axes.tripcolor(self.triang['x_np'],
self.triang['y_np'],
self.triang['triangles'],
facecolors=self.triang['zfaces'],
edgecolors='k')
self.axes.set_title(self.title)
self.axes.set_xlabel(self.lab_x)
self.axes.set_ylabel(self.lab_y)
if self.first_run:
self.fig.colorbar(tri)
self.first_run = False
self.canvas.draw()
|
UTF-8
|
Python
| false | false | 2,014 |
14,465,449,886,643 |
19af476876ee2bb43ce9976876a0dbf60a811539
|
b1bb9a66ffe3894af4b1839ab0156c4081c189ee
|
/luhn.py
|
1923838a8d49566c28fdfa2ce1cc3691c2254930
|
[] |
no_license
|
mrich137/luhn
|
https://github.com/mrich137/luhn
|
22a4f47a233f4bc982e0230bb4a4ea304aad4fa4
|
0622e5b29a2a08df17cd87300bd748a18df55808
|
refs/heads/master
| 2020-05-19T08:00:21.966851 | 2014-08-05T20:53:46 | 2014-08-05T20:53:46 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
def luhn(card_number):
sum = 0
num_digits = len(card_number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(card_number[count])
if not ((count & 1) ^ oddeven):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ((sum % 10) == 0)
def main():
""" Main function for module debug """
cc = raw_input('Enter a credit card number: ')
if(luhn(str(cc))):
print "CC # is VALID!"
else:
print "CC # is INVALID!"
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
377,957,132,456 |
39b4a4dcf728306163a60d53fba4720cba0b76ef
|
a6d1f5a8b0469efe7faf5177bcee64dbea05d4b4
|
/kamaki/cli/one_command.py
|
377725281d8c0e996d3ef9e31b80565117723f6a
|
[
"BSD-2-Clause"
] |
permissive
|
apyrgio/kamaki
|
https://github.com/apyrgio/kamaki
|
3adb89ae5608549087a6695b6e39abd6f17b0969
|
2632294b688a119646c249a0852496472433826f
|
refs/heads/master
| 2021-01-18T01:22:47.475580 | 2014-02-19T12:54:03 | 2014-02-19T12:54:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Copyright 2012-2013 GRNET S.A. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
# or implied, of GRNET S.A.command
from kamaki.cli import (
get_command_group, set_command_params, print_subcommands_help, exec_cmd,
update_parser_help, _groups_help, _load_spec_module,
init_cached_authenticator, kloger)
from kamaki.cli.errors import CLIUnknownCommand
def _get_cmd_tree_from_spec(spec, cmd_tree_list):
for tree in cmd_tree_list:
if tree.name == spec:
return tree
raise CLIUnknownCommand('Unknown command: %s' % spec)
def _get_best_match_from_cmd_tree(cmd_tree, unparsed):
matched = [term for term in unparsed if not term.startswith('-')]
while matched:
try:
return cmd_tree.get_command('_'.join(matched))
except KeyError:
matched = matched[:-1]
return None
def run(cloud, parser, _help):
group = get_command_group(list(parser.unparsed), parser.arguments)
if not group:
#parser.parser.print_help()
parser.print_help()
_groups_help(parser.arguments)
exit(0)
nonargs = [term for term in parser.unparsed if not term.startswith('-')]
set_command_params(nonargs)
global _best_match
_best_match = []
_cnf = parser.arguments['config']
group_spec = _cnf.get('global', '%s_cli' % group)
spec_module = _load_spec_module(group_spec, parser.arguments, '_commands')
if spec_module is None:
raise CLIUnknownCommand(
'Could not find specs for %s commands' % group,
details=[
'Make sure %s is a valid command group' % group,
'Refer to kamaki documentation for setting custom command',
'groups or overide existing ones'])
cmd_tree = _get_cmd_tree_from_spec(group, spec_module._commands)
if _best_match:
cmd = cmd_tree.get_command('_'.join(_best_match))
else:
cmd = _get_best_match_from_cmd_tree(cmd_tree, parser.unparsed)
_best_match = cmd.path.split('_')
if cmd is None:
kloger.info('Unexpected error: failed to load command (-d for more)')
exit(1)
update_parser_help(parser, cmd)
if _help or not cmd.is_command:
if cmd.cmd_class:
parser.required = getattr(cmd.cmd_class, 'required', None)
parser.print_help()
if getattr(cmd, 'long_help', False):
print 'Details:\n', cmd.long_help
print_subcommands_help(cmd)
exit(0)
cls = cmd.cmd_class
auth_base = init_cached_authenticator(_cnf, cloud, kloger) if (
cloud) else None
executable = cls(parser.arguments, auth_base, cloud)
parser.required = getattr(cls, 'required', None)
parser.update_arguments(executable.arguments)
for term in _best_match:
parser.unparsed.remove(term)
exec_cmd(executable, parser.unparsed, parser.print_help)
|
UTF-8
|
Python
| false | false | 2,014 |
11,201,274,758,797 |
43094bb660e813f1c6941965f5f799087c244a1e
|
3eed51b6fac7b621079a829ce1ad6a1a4eaae0b0
|
/teachers/migrations/0001_initial.py
|
a43d6efac22d5a671a4d265780bd0ef66fdc1f4b
|
[] |
no_license
|
iamjarret/nycteachereval
|
https://github.com/iamjarret/nycteachereval
|
f47f208062568c04b88cf1388cd8403739105afd
|
0457a60b3370746a0ab37c090f00cc6e181ef3c9
|
refs/heads/master
| 2020-06-03T01:13:19.942152 | 2013-03-11T07:39:30 | 2013-03-11T07:39:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'School'
db.create_table('teachers_school', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dbn', self.gf('django.db.models.fields.CharField')(unique=True, max_length=6)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('freelunch', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('ell', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('asian', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('black', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('hisp', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('white', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('male', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('female', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('overall_grade', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('dropout', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
))
db.send_create_signal('teachers', ['School'])
# Adding model 'Teachers'
db.create_table('teachers_teachers', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('teacherid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('dbn', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teachers.School'], to_field='dbn')),
('va_0910_eng', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0910_math', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0809_eng', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0809_math', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0708_eng', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0708_math', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0607_eng', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('va_0607_math', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('grade', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('teachers', ['Teachers'])
def backwards(self, orm):
# Deleting model 'School'
db.delete_table('teachers_school')
# Deleting model 'Teachers'
db.delete_table('teachers_teachers')
models = {
'teachers.school': {
'Meta': {'ordering': "['name']", 'object_name': 'School'},
'asian': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'black': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'dbn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'dropout': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'ell': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'female': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'freelunch': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'hisp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'overall_grade': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'white': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'})
},
'teachers.teachers': {
'Meta': {'ordering': "['dbn']", 'object_name': 'Teachers'},
'dbn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teachers.School']", 'to_field': "'dbn'"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'grade': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'teacherid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'va_0607_eng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0607_math': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0708_eng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0708_math': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0809_eng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0809_math': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0910_eng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'va_0910_math': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['teachers']
|
UTF-8
|
Python
| false | false | 2,013 |
9,612,136,849,293 |
230ceb9ef45166ed0e670fcb9d46716bbe06b0b0
|
852c6425f4d1bb99a3dd22e65374a4482f368c74
|
/apiconnectors/serializers.py
|
01c27e1a8009ae4e28158345825160e1dee20bb3
|
[] |
no_license
|
CMPE295B/travel-app-backend
|
https://github.com/CMPE295B/travel-app-backend
|
c92641efbe0e2faa5e4282e40e75b890aa63a85e
|
4842cfce9403f3ae376cf879391d8e8eb581e3bb
|
refs/heads/master
| 2021-01-01T15:44:28.905301 | 2014-07-28T19:35:06 | 2014-07-28T19:35:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib.auth.models import User
from .models import HotelBooking
from rest_framework import serializers
class HotelReservationSerializer(serializers.ModelSerializer):
agentId = serializers.Field('owner.id')
class Meta:
model = HotelBooking
fields = ('hotelId', 'arrivalDate','departureDate','supplierType',
'roomTypeCode','rateCode','chargeableRate',
'room1','room1FirstName','room1LastName','room1BedTypeId',
'room1SmokingPreference','email','firstName',
'lastName','city','stateProvinceCode',
'countryCode','postalCode','packageId','agentId')
|
UTF-8
|
Python
| false | false | 2,014 |
6,897,717,524,636 |
a93ab46ddfe89cece2341c423e8636073a45c6a6
|
acf6a0fadafc7b9667c1a626706aed48b5824ab2
|
/missionList.py
|
2d1940214b4d3666bba7241778d38414f0d6b198
|
[
"MIT"
] |
permissive
|
nlaurens/SpaceAlert
|
https://github.com/nlaurens/SpaceAlert
|
e7e2de8bb2d43782047c5a178e1a3c7b505f7f9d
|
12b19bf33236a4c023c3e6eea931db1ca35cf551
|
refs/heads/master
| 2020-06-07T08:24:41.978893 | 2013-11-21T18:44:01 | 2013-11-21T18:44:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ConfigParser
class missionList():
"""
"""
def __init__(self):
self.chapter = {}
#TODO
#Scan for all cfg files in the mission dir
#TMP solution, should be replaced with all .cfg files from 'mission' dir.
missionConfigs = ['duckling.cfg', 'LittleDuckling.cfg']
for configFile in missionConfigs:
self.parseConfigFile('missions/'+configFile)
#TODO: CHECK IF ALL MISSIONS ARE PLAYBLE.
def parseConfigFile(self, configFile):
"""
@param configFile:
"""
chapter = {}
config = ConfigParser.RawConfigParser()
config.read(configFile)
for section in config.sections():
options = config.options(section)
mission = {}
for option in options:
mission[option] = config.get(section, option)
chapter[section] = mission
self.chapter.update(chapter)
def getChapters(self):
"""
@return:
"""
chapterList = []
for chapter, missions in self.chapter.iteritems():
chapterList.append(chapter)
chapterList.sort()
return chapterList
def getMissions(self, chapter):
"""
@param chapter:
@return:
"""
missions = []
for mission, missionScript in self.chapter[chapter].iteritems():
missions.append(mission)
missions.sort()
return missions
def getScript(self, chapter, mission):
"""
@param chapter:
@param mission:
@return:
"""
return self.chapter[chapter][mission]
|
UTF-8
|
Python
| false | false | 2,013 |
283,467,852,776 |
a5c58c554fc2b8da15c6a7851ee987b6c2e3def4
|
984c872ef059732e846cec8524a5c74c34bb952e
|
/run/ecryptfs2john.py
|
955101f0f6245119e9e534df7667772ab6784bcb
|
[] |
no_license
|
xchwarze/JohnTheRipper
|
https://github.com/xchwarze/JohnTheRipper
|
5f40c0f459562c7588b5ed2c498c78ba6f7cc63b
|
8e0b2f6175805edfb1cd8cadf9af28a04eec6137
|
HEAD
| 2018-01-15T13:24:26.828348 | 2014-10-25T18:56:25 | 2014-10-25T18:57:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import sys
def process_file(filename):
with open(filename, "r") as f:
h = f.read(16)
if len(h) != 16:
return
# we don't use basename() because we need to distinguish clearly
# between different files all named wrapped-passphrase
sys.stdout.write("%s:$ecryptfs$0$%s\n" % \
(filename, h))
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <wrapped-passphrase file(s)>\n" % \
sys.argv[0])
sys.stderr.write("\nExample: %s ~/.ecryptfs/wrapped-passphrase\n" % \
sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
|
UTF-8
|
Python
| false | false | 2,014 |
1,142,461,321,962 |
9dd6cd96941da5fcb8f5917b3def59157a6e391f
|
7cd8a355e814ad6e358b265ed41b14f409bfc2e1
|
/forum/forms.py
|
e76b83a78f1ecd2ac34d3f72d742cdd30d454b55
|
[] |
no_license
|
ericpriceintel/forum_project
|
https://github.com/ericpriceintel/forum_project
|
81b69ad3b381418e758096d96880737db4210849
|
6bd1d802f6d5c9d53ad28487553e611f567d98f3
|
refs/heads/master
| 2016-08-07T23:32:46.128562 | 2014-05-19T02:36:27 | 2014-05-19T02:36:27 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from django.utils import timezone
from models import Post, Comment
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
class PostTopicForm(forms.ModelForm):
title = forms.CharField()
url = forms.URLField(initial="http://")
class Meta:
model = Post
fields = ('title', 'url')
class RegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password', 'email')
class LoginForm(forms.Form):
username = forms.CharField(max_length=200)
password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("Sorry, that login was invalid, please try again.")
if not user.is_active:
raise forms.ValidationError("Sorry, your account has been disabled.")
return self.cleaned_data
def login(self, request):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
class CommentForm(forms.ModelForm):
comment = forms.CharField(widget=forms.Textarea, label='')
class Meta:
model = Comment
fields = ('comment', )
|
UTF-8
|
Python
| false | false | 2,014 |
18,451,179,514,346 |
a5a8c397712059fcde75ef9e914d9eef256da6c9
|
75aa8fc83d6c13c3d015edfac7e51e687bdc177a
|
/StringModifying.py
|
daad4bb9beb2c0f759615cb28ad1c221d04ca7c4
|
[] |
no_license
|
Zhanglingit/py-smalltools
|
https://github.com/Zhanglingit/py-smalltools
|
decd7f756f9105f52bb6f3c6433823f3e96c5af0
|
cc5d862879ea05542e0aa8ac4a47fd85b12d2a09
|
refs/heads/master
| 2020-12-30T14:56:45.525544 | 2014-06-03T18:31:10 | 2014-06-03T18:31:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
#coding=utf-8
import re,wx
class mFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, u'字符串处理', size=(600,520))
panel = wx.Panel(self, -1)
wx.StaticText(panel,-1,u"输入:",pos = (10, 10),size = (40, -1))
wx.StaticText(panel,-1,u"输出:",pos = (300, 10),size = (40, -1))
self.t1 = wx.TextCtrl(panel, -1, "",style=wx.TE_MULTILINE|wx.HSCROLL,size=(250,250),pos=(10,40))
self.t2 = wx.TextCtrl(panel, -1, "",style=wx.TE_MULTILINE|wx.HSCROLL,size=(250,250),pos=(300,40))
wx.StaticText(panel,-1,u"目标字符串:",pos = (10, 300),size = (80, -1))
self.message1 = wx.TextCtrl(panel,-1,"",pos = (90, 300),size = (200, -1))
wx.StaticText(panel,-1,u"替换为:",pos = (10, 340),size = (80, -1))
self.message2 = wx.TextCtrl(panel,-1,"self",pos = (90, 340),size = (200, -1))
wx.StaticText(panel,-1,u"在前面添加:",pos = (10, 380),size = (80, -1))
self.message3 = wx.TextCtrl(panel,-1,"",pos = (90, 380),size = (200, -1))
wx.StaticText(panel,-1,u"在后面添加:",pos = (10, 420),size = (80, -1))
self.message4 = wx.TextCtrl(panel,-1,"",pos = (90, 420),size = (200, -1))
self.b1 = wx.Button(panel,-1,"go",pos = (400,340),size = (80,80))
self.Bind(wx.EVT_BUTTON, self.OnClick, self.b1)
self.b1.SetDefault()
self.b2 = wx.Button(panel,-1,u"←",pos = (350,300),size = (80,30))
self.Bind(wx.EVT_BUTTON, self.OnClick1, self.b2)
self.b3 = wx.Button(panel,-1,u"→",pos = (450,300),size = (80,30))
self.Bind(wx.EVT_BUTTON, self.OnDoCopy, self.b3)
self.b3 = wx.Button(panel,-1,"C",pos = (50,10),size = (30,18))
self.Bind(wx.EVT_BUTTON, self.clear, self.b3)
self.strm = ""
def OnClick(self, event):
str = self.t1.GetValue()
if str == "":
return
str1 = self.message1.GetValue()
if str1 == "":
return
str2 = self.message2.GetValue()
str3 = self.message3.GetValue()
str4 = self.message4.GetValue()
strr = ""
strre = re.compile(str1.encode("utf-8"))
list = strre.findall(str)
str2 = str2.replace(u"\\n",u"\n")
str3 = str3.replace(u"\\n",u"\n")
str4 = str4.replace(u"\\n",u"\n")
str2 = str2.replace(u"\\t",u"\t")
str3 = str3.replace(u"\\t",u"\t")
str4 = str4.replace(u"\\t",u"\t")
for x in list:
qd = str.find(x)
hd = str.find(x)+len(x)
if str2 == "self":
strr = strr+str[:qd]+str3+x+str4
else:
strr = strr+str[:qd]+str3+str2+str4
str = str[hd:]
strr = strr+str
self.strm = strr
self.t2.SetLabel(self.strm)
def OnClick1(self, event):
self.t1.SetLabel(self.strm)
def OnDoCopy(self, event):
data = wx.TextDataObject()
data.SetText(self.strm)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def clear(self,event):
self.t1.SetLabel("")
app = wx.PySimpleApp()
frm = mFrame()
frm.Show()
app.MainLoop()
|
UTF-8
|
Python
| false | false | 2,014 |
9,921,374,473,935 |
6ccec73a323dc11e2cdbe7cad45f20f8a68687f4
|
b8d13766b66a64338a35f6a2b4c20b6c2c088224
|
/rubik/visualizer/impl/base_class_impl.py
|
42ebccddbf7dd155a38ae21a81900ceef739284d
|
[
"Apache-2.0"
] |
permissive
|
johan--/rubik
|
https://github.com/johan--/rubik
|
1723fc3492139bf94fcc050a27c4b2ff80ef0ffe
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
refs/heads/master
| 2021-01-18T11:41:32.189032 | 2014-09-25T09:58:53 | 2014-09-25T09:58:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'BaseClassImpl',
]
import weakref
from ... import conf
from traitsui.ui_info import UIInfo
class BaseClassImpl(object):
WINDOW_TITLE = None
CURRENT_ID = 0
ID_FORMAT = "{id}"
def __init__(self, logger, title=None):
self.logger = logger
self.name = self.reserve_id()
if title is None:
title = self.name
self.title = title
self.handler_infos = []
def add_handler_info(self, handler, info):
self.handler_infos.append((weakref.ref(handler), weakref.ref(info)))
@classmethod
def reserve_id(cls):
cur_id = cls.CURRENT_ID
cls.CURRENT_ID += 1
return cls.ID_FORMAT.format(id=cur_id)
@classmethod
def set_window_title(cls, title=None):
if title is None:
title = cls.ID_FORMAT.format(id=cls.CURRENT_ID)
cls.WINDOW_TITLE = title
@classmethod
def consume_window_title(cls):
title = cls.WINDOW_TITLE
cls.WINDOW_TITLE = None
return title
@classmethod
def default_window_title(cls):
if cls.WINDOW_TITLE is None:
cls.set_window_title()
return cls.consume_window_title()
def has_trait(self, trait):
return trait in self.editable_traits()
def log_trait_change(self, traits):
self.logger.info("{}: changed traits: {}".format(self.name, traits))
def close_uis(self, finish=True, close=True):
self.logger.info("{}: closing windows".format(self.name))
for handler_ref, info_ref in reversed(self.handler_infos):
handler = handler_ref()
info = info_ref()
if info is not None and handler is not None:
self.logger.info("{}: closing handler {}, info {}".format(self.name, handler, info))
if finish and info.ui:
info.ui.finish()
if close:
handler.close(info, True)
|
UTF-8
|
Python
| false | false | 2,014 |
17,660,905,555,882 |
724b5a5d8a98799cdfb56422295309b503665b21
|
bba5a9b2b57fb0aa0870a03c4adf9845ae28e205
|
/lib/data_stores/http_data_store_test.py
|
3b8862bca9573d192830534506cc064bf069a30e
|
[
"Apache-2.0"
] |
permissive
|
RyPeck/grr
|
https://github.com/RyPeck/grr
|
710bff08e4c710280d3742c43c10f67af774fc5d
|
44d42c2ab4a1feca45b4cbdb01cebb8a2c543884
|
refs/heads/master
| 2020-04-05T18:31:21.949852 | 2014-09-16T18:34:15 | 2014-09-16T18:34:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""Tests the HTTP remote data store abstraction."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import http_data_store
class HTTPDataStoreMixin(object):
def InitDatastore(self):
config_lib.CONFIG.Set("HTTPDataStore.username", "testuser")
config_lib.CONFIG.Set("HTTPDataStore.password", "testpass")
config_lib.CONFIG.Set("Dataserver.server_list", ["http://127.0.0.1:7000",
"http://127.0.0.1:7001"])
data_store.DB = http_data_store.HTTPDataStore()
def DestroyDatastore(self):
pass
class HTTPDataStoreTest(HTTPDataStoreMixin,
data_store_test.DataStoreTest):
"""Test the remote data store."""
class HTTPDataStoreBenchmarks(HTTPDataStoreMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the HTTP remote data store abstraction."""
class HTTPDataStoreCSVBenchmarks(HTTPDataStoreMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the HTTP remote data store."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
UTF-8
|
Python
| false | false | 2,014 |
575,525,643,305 |
121a62950e76bf65f8875e99fdd62c34d5eb2c8d
|
f000c040ce31532abf57f103968d9d88c51ab274
|
/SpiffWorkflow/specs/Execute.py
|
f3fb295fa8ee7bcdd52df925df86c363345185f8
|
[
"LGPL-3.0-only"
] |
non_permissive
|
matthewhampton/SpiffWorkflow
|
https://github.com/matthewhampton/SpiffWorkflow
|
9b1c3d18b64aa2cdacdde55206c21a3de4d60053
|
903f77b2cc12803ca40dfc2b8db3652a770881a9
|
refs/heads/master
| 2021-01-24T04:34:55.722530 | 2013-08-07T12:29:16 | 2013-08-14T13:33:12 | 5,495,987 | 6 | 5 |
LGPL-3.0
| true | 2019-07-25T13:56:22 | 2012-08-21T14:14:02 | 2019-01-23T11:43:12 | 2019-05-23T07:29:54 | 11,135 | 12 | 5 | 1 |
Python
| false | false |
# Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import subprocess
from SpiffWorkflow.Task import Task
from SpiffWorkflow.exceptions import WorkflowException
from SpiffWorkflow.specs.TaskSpec import TaskSpec
class Execute(TaskSpec):
"""
This class executes an external process, goes into WAITING until the
process is complete, and returns the results of the execution.
Usage:
task = Execute(spec, 'Ping', args=["ping", "-t", "1", "127.0.0.1"])
... when workflow complete
print workflow.get_task('Ping').results
"""
def __init__(self, parent, name, args=None, **kwargs):
"""
Constructor.
:type parent: TaskSpec
:param parent: A reference to the parent task spec.
:type name: str
:param name: The name of the task spec.
:type args: list
:param args: args to pass to process (first arg is the command).
:type kwargs: dict
:param kwargs: kwargs to pass-through to TaskSpec initializer.
"""
assert parent is not None
assert name is not None
TaskSpec.__init__(self, parent, name, **kwargs)
self.args = args
def _try_fire(self, my_task, force = False):
"""Returns False when successfully fired, True otherwise"""
if (not hasattr(my_task, 'subprocess')) or my_task.subprocess is None:
my_task.subprocess = subprocess.Popen(self.args,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
if my_task.subprocess:
my_task.subprocess.poll()
if my_task.subprocess.returncode is None:
# Still waiting
return False
else:
results = my_task.subprocess.communicate()
my_task.results = results
return True
return False
def _update_state_hook(self, my_task):
if not self._try_fire(my_task):
my_task.state = Task.WAITING
return
super(Execute, self)._update_state_hook(my_task)
def serialize(self, serializer):
return serializer._serialize_execute(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
spec = serializer._deserialize_execute(wf_spec, s_state)
return spec
|
UTF-8
|
Python
| false | false | 2,013 |
17,093,969,848,265 |
c0bff32269c889e5a6870402ecaf8558788c3487
|
e4c3655e3e114da9860b258993504f888a752048
|
/dynd/tests/test_numpy_interop.py
|
73a542158296a057fef56153b4eda3a914ec4efe
|
[
"BSD-2-Clause"
] |
permissive
|
fivejjs/dynd-python
|
https://github.com/fivejjs/dynd-python
|
2131ff94dc0795757eaa8e548f00b9fb1aa5a248
|
ad7d9aaa9bcf5a80dcf0c6858d2da4b14539a376
|
refs/heads/master
| 2017-05-02T23:09:50.173562 | 2013-06-27T04:39:07 | 2013-06-27T04:39:07 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import unittest
from dynd import nd, ndt
import numpy as np
from datetime import date
from numpy.testing import *
class TestNumpyDTypeInterop(unittest.TestCase):
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_dtype_from_numpy_scalar_types(self):
# Tests converting numpy scalar types to pydynd dtypes
self.assertEqual(ndt.bool, nd.dtype(np.bool))
self.assertEqual(ndt.bool, nd.dtype(np.bool_))
self.assertEqual(ndt.int8, nd.dtype(np.int8))
self.assertEqual(ndt.int16, nd.dtype(np.int16))
self.assertEqual(ndt.int32, nd.dtype(np.int32))
self.assertEqual(ndt.int64, nd.dtype(np.int64))
self.assertEqual(ndt.uint8, nd.dtype(np.uint8))
self.assertEqual(ndt.uint16, nd.dtype(np.uint16))
self.assertEqual(ndt.uint32, nd.dtype(np.uint32))
self.assertEqual(ndt.uint64, nd.dtype(np.uint64))
self.assertEqual(ndt.float32, nd.dtype(np.float32))
self.assertEqual(ndt.float64, nd.dtype(np.float64))
self.assertEqual(ndt.cfloat32, nd.dtype(np.complex64))
self.assertEqual(ndt.cfloat64, nd.dtype(np.complex128))
def test_dtype_from_numpy_dtype(self):
# Tests converting numpy dtypes to pydynd dtypes
# native byte order
self.assertEqual(ndt.bool, nd.dtype(np.dtype(np.bool)))
self.assertEqual(ndt.int8, nd.dtype(np.dtype(np.int8)))
self.assertEqual(ndt.int16, nd.dtype(np.dtype(np.int16)))
self.assertEqual(ndt.int32, nd.dtype(np.dtype(np.int32)))
self.assertEqual(ndt.int64, nd.dtype(np.dtype(np.int64)))
self.assertEqual(ndt.uint8, nd.dtype(np.dtype(np.uint8)))
self.assertEqual(ndt.uint16, nd.dtype(np.dtype(np.uint16)))
self.assertEqual(ndt.uint32, nd.dtype(np.dtype(np.uint32)))
self.assertEqual(ndt.uint64, nd.dtype(np.dtype(np.uint64)))
self.assertEqual(ndt.float32, nd.dtype(np.dtype(np.float32)))
self.assertEqual(ndt.float64, nd.dtype(np.dtype(np.float64)))
self.assertEqual(ndt.cfloat32, nd.dtype(np.dtype(np.complex64)))
self.assertEqual(ndt.cfloat64, nd.dtype(np.dtype(np.complex128)))
self.assertEqual(ndt.make_fixedstring_dtype(10, 'ascii'),
nd.dtype(np.dtype('S10')))
self.assertEqual(ndt.make_fixedstring_dtype(10, 'utf_32'),
nd.dtype(np.dtype('U10')))
# non-native byte order
nonnative = self.nonnative
self.assertEqual(ndt.make_byteswap_dtype(ndt.int16),
nd.dtype(np.dtype(nonnative + 'i2')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.int32),
nd.dtype(np.dtype(nonnative + 'i4')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.int64),
nd.dtype(np.dtype(nonnative + 'i8')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.uint16),
nd.dtype(np.dtype(nonnative + 'u2')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.uint32),
nd.dtype(np.dtype(nonnative + 'u4')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.uint64),
nd.dtype(np.dtype(nonnative + 'u8')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.float32),
nd.dtype(np.dtype(nonnative + 'f4')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.float64),
nd.dtype(np.dtype(nonnative + 'f8')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.cfloat32),
nd.dtype(np.dtype(nonnative + 'c8')))
self.assertEqual(ndt.make_byteswap_dtype(ndt.cfloat64),
nd.dtype(np.dtype(nonnative + 'c16')))
class TestNumpyViewInterop(unittest.TestCase):
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_dynd_view_of_numpy_array(self):
# Tests viewing a numpy array as a dynd.array
nonnative = self.nonnative
a = np.arange(10, dtype=np.int32)
n = nd.array(a)
self.assertEqual(n.udtype, ndt.int32)
self.assertEqual(n.undim, a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
a = np.arange(12, dtype=(nonnative + 'i4')).reshape(3,4)
n = nd.array(a)
self.assertEqual(n.udtype, ndt.make_byteswap_dtype(ndt.int32))
self.assertEqual(n.undim, a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
a = np.arange(49, dtype='i1')
a = a[1:].view(dtype=np.int32).reshape(4,3)
n = nd.array(a)
self.assertEqual(n.udtype, ndt.make_unaligned_dtype(ndt.int32))
self.assertEqual(n.undim, a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
a = np.arange(49, dtype='i1')
a = a[1:].view(dtype=(nonnative + 'i4')).reshape(2,2,3)
n = nd.array(a)
self.assertEqual(n.udtype,
ndt.make_unaligned_dtype(ndt.make_byteswap_dtype(ndt.int32)))
self.assertEqual(n.undim, a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
def test_numpy_view_of_dynd_array(self):
# Tests viewing a dynd.array as a numpy array
nonnative = self.nonnative
n = nd.array(np.arange(10, dtype=np.int32))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(np.int32))
self.assertTrue(a.flags.aligned)
self.assertEqual(a.ndim, n.undim)
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
n = nd.array(np.arange(12, dtype=(nonnative + 'i4')).reshape(3,4))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(nonnative + 'i4'))
self.assertTrue(a.flags.aligned)
self.assertEqual(a.ndim, n.undim)
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
n = nd.array(np.arange(49, dtype='i1')[1:].view(dtype=np.int32).reshape(4,3))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(np.int32))
self.assertFalse(a.flags.aligned)
self.assertEqual(a.ndim, n.undim)
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
n = nd.array(np.arange(49, dtype='i1')[1:].view(
dtype=(nonnative + 'i4')).reshape(2,2,3))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(nonnative + 'i4'))
self.assertFalse(a.flags.aligned)
self.assertEqual(a.ndim, n.undim)
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
def test_numpy_dynd_fixedstring_interop(self):
# Tests converting fixed-size string arrays to/from numpy
# ASCII Numpy -> dynd
a = np.array(['abc', 'testing', 'array'])
b = nd.array(a)
if sys.version_info >= (3, 0):
self.assertEqual(ndt.make_fixedstring_dtype(7, 'utf_32'), b.udtype)
else:
self.assertEqual(ndt.make_fixedstring_dtype(7, 'ascii'), b.udtype)
self.assertEqual(b.udtype, nd.dtype(a.dtype))
# Make sure it's ascii
a = a.astype('S7')
b = nd.array(a)
# ASCII dynd -> Numpy
c = np.asarray(b)
self.assertEqual(a.dtype, c.dtype)
assert_array_equal(a, c)
# verify 'a' and 'c' are looking at the same data
a[1] = 'modify'
assert_array_equal(a, c)
# ASCII dynd -> UTF32 dynd
b_u = b.ucast(ndt.make_fixedstring_dtype(7, 'utf_32'))
self.assertEqual(
ndt.make_convert_dtype(
ndt.make_fixedstring_dtype(7, 'utf_32'),
ndt.make_fixedstring_dtype(7, 'ascii')),
b_u.udtype)
# Evaluate to its value array
b_u = b_u.eval()
self.assertEqual(
ndt.make_fixedstring_dtype(7, 'utf_32'),
b_u.udtype)
# UTF32 dynd -> Numpy
c_u = np.asarray(b_u)
self.assertEqual(b_u.udtype, nd.dtype(c_u.dtype))
assert_array_equal(a.astype('U'), c_u)
# 'a' and 'c_u' are not looking at the same data
a[1] = 'diff'
self.assertFalse(np.all(a == c_u))
def test_numpy_blockref_string(self):
# Blockref strings don't have a corresponding Numpy construct
# Therefore numpy makes an object array scalar out of them.
a = nd.array("abcdef")
if sys.version_info >= (3, 0):
self.assertEqual(a.dtype, ndt.string)
else:
self.assertEqual(a.dtype, ndt.make_string_dtype('ascii'))
# Some versions of NumPy produce an error instead,
# so this assertion is removed
#self.assertEqual(np.asarray(a).dtype, np.dtype(object))
a = nd.array(u"abcdef \uc548\ub155")
self.assertEqual(a.dtype, ndt.string)
# Some versions of NumPy produce an error instead,
# so this assertion is removed
#self.assertEqual(np.asarray(a).dtype, np.dtype(object))
def test_readwrite_access_flags(self):
# Tests that read/write access control is preserved to/from numpy
a = np.arange(10.)
# Writeable
b = nd.array(a)
b[0] = 2.0
self.assertEqual(nd.as_py(b[0]), 2.0)
self.assertEqual(a[0], 2.0)
# Not writeable
a.flags.writeable = False
b = nd.array(a)
def assign_to(x,y):
x[0] = y
self.assertRaises(RuntimeError, assign_to, b, 3.0)
# should still be 2.0
self.assertEqual(nd.as_py(b[0]), 2.0)
self.assertEqual(a[0], 2.0)
class TestNumpyScalarInterop(unittest.TestCase):
def test_numpy_scalar_conversion_dtypes(self):
self.assertEqual(nd.array(np.bool_(True)).dtype, ndt.bool)
self.assertEqual(nd.array(np.bool(True)).dtype, ndt.bool)
self.assertEqual(nd.array(np.int8(100)).dtype, ndt.int8)
self.assertEqual(nd.array(np.int16(100)).dtype, ndt.int16)
self.assertEqual(nd.array(np.int32(100)).dtype, ndt.int32)
self.assertEqual(nd.array(np.int64(100)).dtype, ndt.int64)
self.assertEqual(nd.array(np.uint8(100)).dtype, ndt.uint8)
self.assertEqual(nd.array(np.uint16(100)).dtype, ndt.uint16)
self.assertEqual(nd.array(np.uint32(100)).dtype, ndt.uint32)
self.assertEqual(nd.array(np.uint64(100)).dtype, ndt.uint64)
self.assertEqual(nd.array(np.float32(100.)).dtype, ndt.float32)
self.assertEqual(nd.array(np.float64(100.)).dtype, ndt.float64)
self.assertEqual(nd.array(np.complex64(100j)).dtype, ndt.cfloat32)
self.assertEqual(nd.array(np.complex128(100j)).dtype, ndt.cfloat64)
if np.__version__ >= '1.7':
self.assertEqual(nd.array(np.datetime64('2000-12-13')).dtype, ndt.date)
def test_numpy_scalar_conversion_values(self):
self.assertEqual(nd.as_py(nd.array(np.bool_(True))), True)
self.assertEqual(nd.as_py(nd.array(np.bool_(False))), False)
self.assertEqual(nd.as_py(nd.array(np.int8(100))), 100)
self.assertEqual(nd.as_py(nd.array(np.int8(-100))), -100)
self.assertEqual(nd.as_py(nd.array(np.int16(20000))), 20000)
self.assertEqual(nd.as_py(nd.array(np.int16(-20000))), -20000)
self.assertEqual(nd.as_py(nd.array(np.int32(1000000000))), 1000000000)
self.assertEqual(nd.as_py(nd.array(np.int64(-1000000000000))), -1000000000000)
self.assertEqual(nd.as_py(nd.array(np.int64(1000000000000))), 1000000000000)
self.assertEqual(nd.as_py(nd.array(np.int32(-1000000000))), -1000000000)
self.assertEqual(nd.as_py(nd.array(np.uint8(200))), 200)
self.assertEqual(nd.as_py(nd.array(np.uint16(50000))), 50000)
self.assertEqual(nd.as_py(nd.array(np.uint32(3000000000))), 3000000000)
self.assertEqual(nd.as_py(nd.array(np.uint64(10000000000000000000))), 10000000000000000000)
self.assertEqual(nd.as_py(nd.array(np.float32(2.5))), 2.5)
self.assertEqual(nd.as_py(nd.array(np.float64(2.5))), 2.5)
self.assertEqual(nd.as_py(nd.array(np.complex64(2.5-1j))), 2.5-1j)
self.assertEqual(nd.as_py(nd.array(np.complex128(2.5-1j))), 2.5-1j)
if np.__version__ >= '1.7':
self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13'))), date(2000, 12, 13))
def test_expr_struct_conversion(self):
a = nd.array([date(2000, 12, 13), date(1995, 5, 2)]).to_struct()
b = nd.as_numpy(a, allow_copy=True)
# Use the NumPy assertions which support arrays
assert_equal(b['year'], [2000, 1995])
assert_equal(b['month'], [12, 5])
assert_equal(b['day'], [13, 2])
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,013 |
10,161,892,634,761 |
5f83ca56dc3c7a1bf301d6898f2c4031951d4761
|
b3e42461d26a3c458f93a4d8408d460176177e37
|
/src/push_anchor_file.py
|
41ec525e639e739c3bca268bc0799f769bf33912
|
[] |
no_license
|
mchrzanowski/US_Twitter_Vote_Prediction
|
https://github.com/mchrzanowski/US_Twitter_Vote_Prediction
|
8f54a48a54ce03b371e3bcb8ca91529d46eae6e9
|
c5a4d7c435d0c35d8584de86decad9356ee35341
|
refs/heads/master
| 2020-05-23T14:03:52.483641 | 2013-08-24T05:53:12 | 2013-08-24T05:53:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import constants
import csv
import MySQLdb
import time
def run(filepath):
db = MySQLdb.connect(user=constants.mysql_user, passwd=constants.mysql_password,
db=constants.mysql_database, charset=constants.mysql_charset)
db.autocommit(True)
cursor = db.cursor()
cursor.execute(""" delete from anchors """)
delayed_anchor_insertions = set()
delayed_industry_insertions = set()
fh = csv.DictReader(open(filepath, 'rb'), delimiter=',')
for row in fh:
delayed_industry_insertions.add(row['Industry'].lower())
row['Twitter Account'] = row['Twitter Account'].lstrip('@').lower()
delayed_anchor_insertions.add((row['Endorses which candidate'], row['Twitter Account'], row['Industry']))
cursor.executemany(""" insert ignore into industries (industry)
values (%s) """, delayed_industry_insertions)
cursor.executemany(""" insert ignore into anchors (account_id, vote, industry_id)
select a.account_id, %s, i.industry_id from accounts a, industries i
where a.account_name like %s and i.industry like %s """, delayed_anchor_insertions)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Push raw anchor csv file into database")
parser.add_argument('-file', type=str, required=True,
help='Provide a path to file to push.')
args = vars(parser.parse_args())
start = time.time()
run(args['file'])
end = time.time()
print "Runtime: %f seconds" % (end - start)
|
UTF-8
|
Python
| false | false | 2,013 |
9,646,496,578,748 |
cc7431b9649ee0715ec58af2600dc09afea6371d
|
071aacb6b9039352af608a2418c9dbc94f728e93
|
/boring/models.py
|
0a77d7f644238dda6cfffbc5390f4fad260cddd6
|
[] |
no_license
|
slinkp/pygotham_hamage_demo
|
https://github.com/slinkp/pygotham_hamage_demo
|
1b0883909f4efa2769af46e7b4665e3db13162d2
|
0267be35cd29adcf46d5fe014482752960daa309
|
refs/heads/master
| 2021-01-23T07:03:08.764377 | 2012-06-14T19:06:43 | 2012-06-14T19:06:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.comments.moderation import CommentModerator, moderator
# Create your models here.
class Entry(models.Model):
"Blog-ish demo content. Yes, boring."
author = models.CharField(max_length=256)
title = models.CharField(max_length=256)
body = models.TextField()
class EntryModerator(CommentModerator):
"""Wire up django.contrib.comments moderation
to a particular model.
This is not a particularly good idea, because if allow() returns False
(don't allow the object), Django gives the user a not very useful
'security has been tampered with' message, and you apparently have
no influence over what it says.
"""
def allow(self, comment, content_object, request):
from hamage.backends.django_hamage.models import DjangoFilterSystem
from hamage.backends.django_hamage.models import RejectContent
filtersys = DjangoFilterSystem()
if comment.user_name:
author = comment.user_name
elif comment.user:
author = comment.user.name
else:
author = ''
if comment.user_email:
author = '%s <%s>' % (author, comment.user_email)
author = author.strip()
changes = [(u'', comment.comment),
(u'', comment.url),
(u'', comment.email),
]
try:
filtersys.test(request, author, changes)
return True
except RejectContent:
return False
#moderator.register(Entry, EntryModerator)
|
UTF-8
|
Python
| false | false | 2,012 |
19,524,921,341,857 |
088267ebf6ab1468c56ddeabd627d8cddf7dc312
|
05d55e0ad951f97c8786906c73c9ecbae087dbda
|
/deriving/derive_paragraphs.py
|
f7f2e58148a8bd18de44aaa16e1232d147b0877f
|
[] |
no_license
|
priidukull/yes_this_can_be
|
https://github.com/priidukull/yes_this_can_be
|
ce354a6cbf4c2575d20b9fa794ff35583b1d8767
|
a2126fe22771779c265073a973f522ab7d1ae46f
|
refs/heads/master
| 2020-04-27T12:18:07.866827 | 2014-11-30T20:36:34 | 2014-11-30T20:36:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import argparse
import logging
from app.models import statute_xml
from deriving.derive_points import DerivePoints
from deriving.derive_sections import DeriveSections
from helpers import statute_xml_helper
from app.models.paragraph import ParagraphRepo
class DeriveParagraphs():
def __init__(self):
self._paragraph_mdl = ParagraphRepo()
self._statute_xml_helper = statute_xml_helper.Helper()
self._statute_xml_mdl = statute_xml.StatuteXml()
def derive_all(self, statute_ids=None):
logging.info("Started deriving paragraphs")
rows = self._statute_xml_mdl.get_by_statute_ids(statute_ids=statute_ids)
for row in rows:
self._derive_one(row)
logging.info("COMPLETED deriving paragraphs")
def _derive_one(self, row):
paragraphs = self._statute_xml_helper.parse_paragraphs(row=row)
self._paragraph_mdl.insert_many(paragraphs=paragraphs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Derives paragraphs and may be other features')
parser.add_argument('--dep', help='also derive depending features', dest='dep', action='store_true')
parser.add_argument('-id', metavar='statute_id', type=int, nargs='+', help='ids of the statutes for which paragraphs are to be derived')
args = parser.parse_args()
if hasattr(args, 'id') and args.id:
DeriveParagraphs().derive_all(statute_ids=args.id)
else:
DeriveParagraphs().derive_all()
if hasattr(args, 'dep') and args.dep:
DeriveSections().derive_all()
DerivePoints().derive_all()
|
UTF-8
|
Python
| false | false | 2,014 |
13,271,448,987,601 |
4e304111cabfbf738c7b9a3d7f8dbd4a42871695
|
68eff6b9f4f659debd100a239b7f612a19be7df2
|
/ovo/website/static_class.py
|
ec006c284c273159d5b36fb19e7c5040ba7889e8
|
[] |
no_license
|
Cybrotech/OVO
|
https://github.com/Cybrotech/OVO
|
b59880079121e2ee7d485c0ca504d416e28c1d8a
|
95c43502457146db8cdc327a58299b8616b64433
|
refs/heads/master
| 2021-01-10T19:28:27.883986 | 2014-09-23T11:13:56 | 2014-09-23T11:13:56 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class MyClass:
added_websites = []
def clear(self):
self.added_websites = []
class MyNewClass:
added_videos = []
deleted_videos = []
def clear(self):
self.added_videos = []
self.deleted_videos = []
|
UTF-8
|
Python
| false | false | 2,014 |
15,375,982,968,317 |
e86354e18c3eb40d57c28bf7e5317c8319f59355
|
602712ebe5f45931c368463f03f6daec3fc14b30
|
/Principal/admin.py
|
c7efb42abb0fb4951f544975ff4ada7fa99df946
|
[
"MIT"
] |
permissive
|
camilortte/Recomendador
|
https://github.com/camilortte/Recomendador
|
89f994bea1c383dac0a7b5a597c11f16d4284eef
|
d708f7d0ea801753673d63972ec92df052621c3d
|
refs/heads/master
| 2016-09-05T15:22:49.432896 | 2013-11-12T16:52:04 | 2013-11-12T16:52:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from Principal.models import *
from django.contrib.auth.admin import UserAdmin
#from django.contrib.auth.models import User
#################################
from Principal.forms import UserChangeForm , UserCreationForm
class MyUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email','nombre','apellido','cedula','is_active', 'is_staff','is_superuser','last_login',)
list_filter = ('is_staff','last_login')
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('nombre','apellido','cedula','localidad')}),
('Permissions', {'fields': ('is_staff','is_active','is_superuser','groups',
'user_permissions')}),
('Important dates', {'fields': ('date_joined','last_login',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'nombre', 'password1', 'password2')}
),
)
search_fields = ('email','nombre','apellido','cedula',)
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
admin.site.register(Usuario, MyUserAdmin)
#admin.site.unregister(Group)
#admin.site.unregister(User)
#admin.site.register(User, UserAdmin)
admin.site.register(Localidad)
admin.site.register(Local)
admin.site.register(TipoLocal)
admin.site.register(Sugerencia)
|
UTF-8
|
Python
| false | false | 2,013 |
2,886,218,044,446 |
3fada932494d59bf5e233f8f0df14c59fe69a961
|
0006a00ea3ded70dc7017082f48ca7db8c6b95c9
|
/cordova/version.py
|
26878ab51dbc77c4838c15e2013fec87eeaf6091
|
[
"MIT"
] |
permissive
|
heynemann/Cordova
|
https://github.com/heynemann/Cordova
|
1eeb9f98819513d857bdd03b57968f463e4d6fdc
|
8a930f07fa3743c6c47f358c32490747815b2f95
|
refs/heads/master
| 2021-01-16T02:27:29.755699 | 2011-05-31T23:37:01 | 2011-05-31T23:37:01 | 1,733,219 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# having the version as a tuple makes some automation easier
__version__ = (0, 1, 0)
def format_version():
return ".".join([str(digit) for digit in __version__])
|
UTF-8
|
Python
| false | false | 2,011 |
7,559,142,463,826 |
9c6000b77d4043c32d43d345d5b84bbbe64899b7
|
0ced115bde5daee794c6231cb1c294c477b48ade
|
/nova/virt/hyperv/vif.py
|
e01006eaa641f575f4d8a291021b006170dfc003
|
[
"Apache-2.0"
] |
permissive
|
maoy/zknova
|
https://github.com/maoy/zknova
|
70cb6fdc51593bd25d2c79618800fe51669d4366
|
8dd09199f5678697be228ffceeaf2c16f6d7319d
|
refs/heads/zk-servicegroup
| 2021-06-05T16:38:16.997462 | 2013-01-11T16:39:14 | 2013-01-29T20:31:53 | 4,551,252 | 0 | 1 |
Apache-2.0
| false | 2020-07-24T08:27:21 | 2012-06-04T19:15:15 | 2014-01-24T04:29:07 | 2013-01-29T20:32:14 | 111,305 | 2 | 1 | 1 |
Python
| false | false |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Cloudbase Solutions Srl
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import sys
import uuid
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
help='External virtual switch Name, '
'if not provided, the first external virtual '
'switch is used'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
LOG = logging.getLogger(__name__)
class HyperVBaseVIFDriver(object):
@abc.abstractmethod
def plug(self, instance, vif):
pass
@abc.abstractmethod
def unplug(self, instance, vif):
pass
class HyperVQuantumVIFDriver(HyperVBaseVIFDriver):
"""Quantum VIF driver."""
def plug(self, instance, vif):
# Quantum takes care of plugging the port
pass
def unplug(self, instance, vif):
# Quantum takes care of unplugging the port
pass
class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
"""Nova network VIF driver."""
def __init__(self):
self._vmutils = vmutils.VMUtils()
self._conn = wmi.WMI(moniker='//./root/virtualization')
def _find_external_network(self):
"""Find the vswitch that is connected to the physical nic.
Assumes only one physical nic on the host
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
def plug(self, instance, vif):
extswitch = self._find_external_network()
if extswitch is None:
raise vmutils.HyperVException(_('Cannot find vSwitch'))
vm_name = instance['name']
nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=vif['id'])[0]
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=vm_name,
ScopeOfResidence="",
VirtualSwitch=extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise vmutils.HyperVException(_('Failed creating port for %s') %
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
nic_data.Connection = [new_port]
self._vmutils.modify_virt_resource(self._conn, nic_data, vm)
def unplug(self, instance, vif):
#TODO(alepilotti) Not implemented
pass
|
UTF-8
|
Python
| false | false | 2,013 |
17,686,675,359,248 |
5f489efa8461791bd1255c4854a21949e5b4eb88
|
e1e5ff980f91226f51d4df7603b9dd9301b0e500
|
/sudokuSolver.py
|
3dd8a179be5bb16e0e14aa57c3289d7959b7654e
|
[] |
no_license
|
avdg/pysudokudemo
|
https://github.com/avdg/pysudokudemo
|
3ac5a96dc6feda37968fe68113cd821532b6aff1
|
d16a7db5ae06af2b2e8ad5850efc0e61b340dc03
|
refs/heads/master
| 2020-05-16T22:01:49.318499 | 2011-06-19T13:49:26 | 2011-06-19T13:49:26 | 38,217,451 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Specialised in solving sudoku's
"""
class SudokuSolver:
"""
Constructor
"""
def __init__(self):
self.reset()
"""
Resets the sudoku status for a normal 9x9 grid sudoku
"""
def reset(self):
self.sudoku = [[range(1, 10) for y in range(9)] for x in range(9)]
self.relation = {
'rows': [[[y, x] for y in range(9)] for x in range(9)],
'columns': [[[x, y] for y in range(9)] for x in range(9)],
'blocks': [[[
int(y / 3) + (int(x / 3) * 3), #expecting int does floor stuff
(y % 3) + (x % 3 * 3)
] for y in range(9)] for x in range(9)]
}
"""
Converts a 2D sudoku to its internal 3D representation
"""
def setSudoku(self, sudoku):
self.sudoku = [[range(1, 10) for y in range(9)] for x in range(9)]
for x, rows in enumerate(sudoku):
for y, cell in enumerate(rows):
if cell != 0:
self.sudoku[x][y] = [cell]
"""
Converts an internal 3d representation to a 2d sudoku
"""
def getSudoku(self):
sudoku = [[0] * 9 for x in range(9)]
for x, rows in enumerate(self.sudoku):
for y, cell in enumerate(rows):
if len(cell) == 1:
sudoku[x][y] = cell[0]
return sudoku
"""
Solve algoritme
"""
def solve(self):
self.solveSolved()
if self.isSolved() != True:
self.cleanupTwins()
self.solveSolved()
"""
Checks the state of the sudoku
"""
def isSolved(self):
unsolved = 0
for x, rows in enumerate(self.sudoku):
for y, cell in enumerate(rows):
if len(cell) == 0:
return 'Error'
elif len(cell) != 1:
unsolved += 1
if unsolved > 0:
return 'Not solved'
else:
return True
"""
Scraps leftovers in grid:
the numbers that are already used as a solution
"""
def solveSolved(self):
solved = False
while not solved:
solved = True
for x, rows in enumerate(self.sudoku):
for y, cell in enumerate(rows):
if len(cell) == 1 and self.markCellSolved(x, y):
solved = False
"""
Returns true if the cell occurs in the given relation block
"""
def cellInBlockRow(self, x, y, blockKey, blockRow):
for cell in self.relation[blockKey][blockRow]:
if [x, y] == cell:
return True
return False
"""
Cleans up surrounding fields by eliminating the number in other fields in
the same blocks
"""
def markCellSolved(self, x, y):
if len(self.sudoku[x][y]) != 1:
return False
hit = False
number = self.sudoku[x][y][0]
# lookup
for name, blocks in self.relation.iteritems():
for row, block in enumerate(blocks):
if self.cellInBlockRow(x, y, name, row):
# cleanup
for cleanCell in self.relation[name][row]:
if cleanCell[0] == x and cleanCell[1] == y:
continue
if number in self.sudoku[cleanCell[0]][cleanCell[1]]:
self.sudoku[cleanCell[0]][cleanCell[1]].remove(number)
hit = True
return hit
"""
Tries to find a match where 2 numbers are found
at 2 identical cells in a row/block/column
"""
def cleanupTwins(self):
# lookup
for name, blocks in self.relation.iteritems():
for row, block in enumerate(blocks):
twins = [[] for x in range(10)] # twins[x][] = [cell1, cell2]
for x in range(1, 10):
found = []
for cell in block:
if x in self.sudoku[cell[0]][cell[1]]:
found.append([cell[0], cell[1]])
if len(found) == 2:
twins[x].append(found)
# cleanup
for x in range(1, 10):
for xItem in twins[x]:
for y in range(x + 1, 10):
for yItem in twins[y]:
if xItem == yItem:
for clearBlock in xItem:
for z in range(1, 10):
if x == z or y == z or \
not z in self.sudoku[clearBlock[0]][clearBlock[1]]:
continue
self.sudoku[clearBlock[0]][clearBlock[1]].remove(z)
|
UTF-8
|
Python
| false | false | 2,011 |
12,970,801,275,439 |
7273c36c857014b66eae297b5ac7e7e6e501a6c4
|
2df85b0a804944f784918c860a0a6f936519689e
|
/learning/second.py
|
e5da5e5875d2f09c3b14fae7f53c108eb1380f6c
|
[] |
no_license
|
arpithpathange/Python
|
https://github.com/arpithpathange/Python
|
ad3159c21012a5eefe696787dfe3f0d9937d1bbd
|
c2368bb6bc38569ec52ba731c22918852184d3f5
|
refs/heads/master
| 2021-01-25T08:37:52.630977 | 2014-06-21T19:55:58 | 2014-06-21T19:55:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#usr/bin/python
import os
def main():
print "In the Main function"
first()
def first():
print "In first function"
sec()
def sec():
print "In sec"
if __name__ == '__main__':
for x in range(0,5):
main()
|
UTF-8
|
Python
| false | false | 2,014 |
10,780,367,948,117 |
06f60d6f9958ea9b53a510cde68914400ab834e2
|
3b2f94467f0ab99d8971a9b79ffa725226bce488
|
/boole/interfaces/ineqs/classes.py
|
52c9d3d0791f7987fa890a60f59f4c54ad22480d
|
[
"Apache-2.0"
] |
permissive
|
avigad/boole
|
https://github.com/avigad/boole
|
482534716e2a3bbe934a86b242888225d7f91ae2
|
2a436c2967dbc968f6a5877c220b9757c3bc17c3
|
refs/heads/master
| 2020-04-30T23:41:44.127209 | 2014-05-09T16:06:54 | 2014-05-09T16:06:54 | 9,526,144 | 18 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from fractions import Fraction, gcd
from string import find, count, split
# use this for errors in this module
class Error(Exception):
pass
class Contradiction(Error):
pass
# kinds of inequalities
GT, GE, LE, LT = range(4)
comp_str = {GT: '>', GE: '>=', LT: '<', LE: '<='}
# swaps GT and LT, GE and LE
def comp_reverse(i):
return 3 - i
# to record where each fact came from
ADD, MUL, HYP, FUN = range(4)
###############################################################################
#
# TERMS
#
# Add_pair(a1, t1) represents a1 * t1
#
# Add_term([(a1, t1), ..., (ak, tk)]) represents a1 * t1 + ... + ak * tk
# stored internally as a list of Add_pair's
#
# Mul_pair((t1, n1)) represents t1 ^ n1
#
# Mul_term([(t1, n1), ..., (tk, nk)]) represents t1 ^ n1 * .... * tk ^ nk
# stored internally as a list of Mul_pairs
#
# Func_term(f,[t1,...,tn]) represents f(t1,t2,...,tn)
#
# An ordering on expressions is defined recursively, using Python's
# built-in lexicographic orderings on pairs and lists
#
# TODO: canonize should check for duplicates and combine them
# TODO: complete documentation
###############################################################################
class Term:
def __repr__(self):
return self.__str__()
def __str__(self):
raise NotImplementedError()
def __truediv__(self, other):
return self / other
def __rtruediv__(self, other):
return other * self ** (-1)
def __rdiv__(self, other):
return (self ** (-1)) * other
def __neg__(self):
return self * (-1)
def __sub__(self, other):
return self +other * (-1)
def __rsub__(self, other):
return (-1) * self +other
def __rmul__(self, other):
return self * other
def __radd__(self, other):
return self +other
class Const(Term):
def __init__(self, name):
self.name = name
def __str__(self):
return "Const({0!s})".format(self.name)
def __cmp__(self, other):
if isinstance(other, Const):
return cmp(self.name, other.name)
else:
return -1
def __mul__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return Const("0")
elif other == 1:
return self
else:
num = Fraction(self.name)
return Const(str(num * other))
return other * self
def __add__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return self
return Add_term([Add_pair(1, self), Add_pair(other, one)])
if isinstance(other, Add_term):
addpairs = other.addpairs
coeff = 1
pair = next((p for p in addpairs if p.term == self), None)
if pair:
addpairs.remove(pair)
coeff = pair.coeff + 1
addpairs.append(Add_pair(coeff, self))
return Add_term(addpairs)
return Add_term([Add_pair(1, self), Add_pair(1, other)])
def __pow__(self, other):
if not isinstance(other, (int, float, Fraction)):
raise Exception("Cannot have variables in the exponent")
if other == 0:
return one
if other == 1:
return self
return Mul_term(Mul_pair(self, other))
def __div__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
raise Exception("Cannot divide by 0")
if other == 1:
return self
coeff = (1 / Fraction(other) if isinstance(other, float)\
else Fraction(1, other))
return Add_term([Add_pair(coeff, self)])
return self * other ** (-1)
def structure(self):
return "Const"
one = Const("1")
zero = Const("0")
class Var(Term):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __cmp__(self, other):
if isinstance(other, Const):
return 1
elif isinstance(other, Var):
return cmp(self.name, other.name)
else:
return -1
def __mul__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return zero
if other == 1:
return self
return Add_term([Add_pair(other, self)])
if isinstance(other, Mul_term):
mulpairs = other.mulpairs
mulpairs.append(Mul_pair(self, 1))
return Mul_term(mulpairs)
return Mul_term([Mul_pair(self, 1), Mul_pair(other, 1)])
def __add__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return self
return Add_term([Add_pair(1, self), Add_pair(other, one)])
if isinstance(other, Add_term):
addpairs = other.addpairs
coeff = 1
pair = next((p for p in addpairs if p.term == self), None)
if pair:
addpairs.remove(pair)
coeff = pair.coeff + 1
addpairs.append(Add_pair(coeff, self))
return Add_term(addpairs)
return Add_term([Add_pair(1, self), Add_pair(1, other)])
def __pow__(self, other):
if not isinstance(other, (int, float, Fraction)):
raise Exception("Cannot have variables in the exponent")
if other == 0:
return one
if other == 1:
return self
return Mul_term(Mul_pair(self, other))
def __div__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
raise Exception("Cannot divide by 0")
if other == 1:
return self
coeff = (1 / Fraction(other) if isinstance(other, float)\
else Fraction(1 / other))
return Add_term([Add_pair(coeff, self)])
return self * other ** (-1)
def structure(self):
return "Var"
class Add_pair:
def __init__(self, coeff, term):
self.coeff = coeff
self.term = term
def __str__(self):
if self.coeff == 1:
return str(self.term)
elif self.term == one:
return str(self.coeff)
else:
return str(self.coeff) + "*" + str(self.term)
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
return cmp((self.term, self.coeff), (other.term, other.coeff))
# used only to scale an addpair by a constant
def __div__(self, factor):
num = (Fraction(self.coeff) if isinstance(self.coeff, float)\
else self.coeff)
denom = (Fraction(factor) if isinstance(factor, float) else factor)
return Add_pair(Fraction(num, denom), self.term)
def __mul__(self, factor):
return Add_pair(self.coeff * factor, self.term)
# this is useful for canonization
def __pow__(self, n):
return Add_pair(pow(self.coeff, n), Mul_pair(self.term, n))
class Add_term(Term):
def __init__(self, l):
if isinstance(l, Term):
self.addpairs = [Add_pair(1, l)]
elif isinstance(l, Add_pair):
self.addpairs = [l]
elif isinstance(l, list):
if not l:
self.addpairs = l
elif isinstance(l[0], Add_pair):
self.addpairs = l
else:
self.addpairs = [Add_pair(p[0], p[1]) for p in l]
else:
raise Error("Add_term of:" + str(l))
def __str__(self):
return ("(" + " + ".join([str(a) for a in self.addpairs]) + ")")
def __cmp__(self, other):
if isinstance(other, (Const, Var)):
return 1
elif isinstance(other, Add_term):
return cmp(self.addpairs, other.addpairs)
else:
return -1
# used to scale by a constant
def __div__(self, factor):
if isinstance(factor, (int, float, Fraction)):
return Add_term([s / (Fraction(factor)\
if isinstance(factor, float) else factor)\
for s in self.addpairs])
return self * factor ** (-1)
def __mul__(self, factor):
if isinstance(factor, (int, float, Fraction)):
return Add_term([s * factor for s in self.addpairs])
if isinstance(factor, Mul_term):
mulpairs = factor.mulpairs
mulpairs.append(Mul_pair(self, 1))
return Mul_term(mulpairs)
return self * Mul_term([Mul_pair(factor, 1)])
def __add__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return self
return self +Add_term([Add_pair(other, one)])
if isinstance(other, Add_term):
addpairs = []
addpairs.extend(self.addpairs)
for a in other.addpairs:
for b in addpairs:
if b.term == a.term:
addpairs.remove(b)
if a.coeff != -b.coeff:
addpairs.append(Add_pair(a.coeff + b.coeff, a.term))
break
else:
addpairs.append(a)
# if not addpairs:
# print self, other
# raise Error("Add_term zero")
# return zero
return(Add_term(addpairs))
return self +Add_term([Add_pair(1, other)])
def __pow__(self, other):
if not isinstance(other, (int, float, Fraction)):
raise Exception("Cannot have variables in the exponent")
if other == 0:
return one
if other == 1:
return self
return Mul_term(Mul_pair(self, other))
def structure(self):
s = "AddTerm("
for t in self.addpairs:
s += t.term.structure() + ","
s = s[:-1] + ")"
return s
class Mul_pair:
def __init__(self, term, exp):
self.term = term
self.exp = exp
def __str__(self):
if self.exp == 1:
return str(self.term)
else:
return str(self.term) + "^" + str(self.exp)
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
return cmp((self.term, self.exp), (other.term, other.exp))
def __pow__(self, n):
if isinstance(n, int) or \
(isinstance(n, Fraction) and n.denominator % 2 == 1):
return Mul_pair(self.term, self.exp * n)
else:
return Mul_pair(Mul_term([self]), n)
# allow a constant multiplier, for the multiplicative part
class Mul_term(Term):
def __init__(self, l, const=1):
self.const = const
if isinstance(l, Term):
self.mulpairs = [Mul_pair(l, 1)]
elif isinstance(l, Mul_pair):
self.mulpairs = [l]
elif isinstance(l, list):
if not l:
self.mulpairs = l
elif isinstance(l[0], Mul_pair):
self.mulpairs = l
else:
self.mulpairs = [Mul_pair(p[0], p[1]) for p in l]
else:
raise Error("Mul_term of: " + str(l))
for item in self.mulpairs:
if not isinstance(item, Mul_pair):
print item, 'is not a mul_pair!'
raise Exception
def __str__(self):
if self.const == 1:
factorlist = []
else:
factorlist = [str(self.const)]
factorlist.extend([str(m) for m in self.mulpairs])
return "(" + " * ".join(factorlist) + ")"
def __cmp__(self, other):
if isinstance(other, (Const, Var, Add_term)):
return 1
else:
return cmp(self.mulpairs, other.mulpairs)
def __mul__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return zero
con = self.const * other
return Mul_term(self.mulpairs, con)
if isinstance(other, Mul_term):
mulpairs = list(self.mulpairs)
for a in other.mulpairs:
for b in mulpairs:
if b.term == a.term:
mulpairs.remove(b)
if a.exp != -b.exp:
mulpairs.append(Mul_pair(a.term, a.exp + b.exp))
break
else:
mulpairs.append(a)
return Mul_term(mulpairs, self.const * other.const)
return self * Mul_term([Mul_pair(other, 1)])
def __add__(self, other):
if isinstance(other, (int, float, Fraction)):
if other == 0:
return self
return Add_term([Add_pair(other, one)]) + self
if isinstance(other, Mul_term):
return Add_term([Add_pair(1, self), Add_pair(1, other)])
return other + self
def __pow__(self, n):
if not isinstance(n, (int, float, Fraction)):
raise Exception("Cannot have variables in the exponent")
mulpairs = [pow(m, n) for m in self.mulpairs]
return Mul_term(mulpairs, pow(Fraction(self.const), n))
def __div__(self, other):
return self * pow(other, -1)
def structure(self):
s = "MulTerm("
for t in self.mulpairs:
s += t.term.structure() + ","
s = s[:-1] + ")"
return s
class Func_term(Term):
def __init__(self, name, args, const=1):
self.name = name
self.args = []
for a in args:
if isinstance(a, Term):
self.args.append(a)
else:
print 'a is not a term, but a... ?', type(a)
self.args.append(eval(a))
self.const = const
def __add__(self, other):
if isinstance(other, Add_term):
return other + self
if isinstance(other, Func_term) and\
other.name == self.name and other.args == self.args:
if other.const + self.const == 0:
return zero
return Func_term(self.name, self.args, other.const + self.const)
return Add_term([Add_pair(1, other)]) + self
def __mul__(self, other):
if isinstance(other, (int, float, Fraction)):
return Func_term(self.name, self.args, self.const * other)
if isinstance(other, Mul_term):
return other * self
return Mul_term([Mul_pair(other, 1)]) * self
def __div__(self, other):
return self * pow(other, -1)
def __pow__(self, n):
if not isinstance(n, (int, float, Fraction)):
raise Exception("Cannot have variables in the exponent")
return Mul_term([Mul_pair(self, n)])
def __cmp__(self, other):
if isinstance(other, Func_term):
if other.name != self.name:
return cmp(self.name, other.name)
return cmp(self.args, other.args)
return 1
def __str__(self):
s = ('' if self.const == 1 else str(self.const) + '*') + self.name + '('
for a in self.args:
s += str(a) + ', '
s = s[:-2] + ')'
return s
def structure(self):
s = ('' if self.const == 1 else str(self.const)) + 'Func_term('
for a in self.args:
s += a.structure() + ','
s = s[:-1] + ')'
return s
###############################################################################
#
# COMPARISON CLASSES
#
###############################################################################
# Comparison and its subclasses are used in the Boole interface.
class Comparison():
def __init__(self):
self.dir = None
self.left = None
self.right = None
# Returns a canonized Zero_comparison
def canonize(self):
term = self.left - self.right
zero_comp = Zero_comparison(term, self.dir)
return canonize_zero_comparison(zero_comp)
def __str__(self):
return "{0!s}{1!s}{2!s}"\
.format(self.left, comp_str[self.dir], self.right)
def neg(self):
"""Return the negated comparison
"""
raise NotImplementedError()
class CompGT(Comparison):
# Left and right are terms
def __init__(self, left, right):
Comparison.__init__(self)
self.dir = GT
self.left = left
self.right = right
def neg(self):
return CompLE(self.left, self.right)
class CompGE(Comparison):
# Left and right are terms
def __init__(self, left, right):
Comparison.__init__(self)
self.dir = GE
self.left = left
self.right = right
def neg(self):
return CompLT(self.left, self.right)
class CompLT(Comparison):
# Left and right are terms
def __init__(self, left, right):
Comparison.__init__(self)
self.dir = LT
self.left = left
self.right = right
def neg(self):
return CompGE(self.left, self.right)
class CompLE(Comparison):
# Left and right are terms
def __init__(self, left, right):
Comparison.__init__(self)
self.dir = LE
self.left = left
self.right = right
def neg(self):
return CompGT(self.left, self.right)
# Comparison between one term a_i and 0
# a_i comp 0
class Zero_comparison_data:
def __init__(self, comp, provenance=None):
self.comp = comp
self.provenance = provenance
def to_string(self, term):
return str(term) + ' ' + comp_str[self.comp] + ' 0'
# comparison between two terms, a_i and a_j
# a_i comp coeff * a_j
class Comparison_data:
def __init__(self, comp, coeff=1, provenance=None):
self.comp = comp
self.coeff = coeff
self.provenance = provenance
def to_string(self, term1, term2):
if self.coeff == 1:
return str(term1) + ' ' + comp_str[self.comp] + ' ' + str(term2)
else:
return (str(term1) + ' ' + comp_str[self.comp] + ' ' + \
str(self.coeff) + '*' + str(term2))
def __str__(self):
return 'comparison: ' + comp_str[self.comp] + ' ' + str(self.coeff)
def __repr__(self):
return self.__str__()
# used to figure out strength of inequalities
def ge(self, other):
if (self.comp in [LT, LE] and other.comp in [GT, GE]) \
or (self.comp in [GT, GE] and other.comp in [LT, LE]):
return True
return self.coeff > other.coeff \
or (self.coeff == other.coeff and self.comp in [LT, GT] \
and other.comp in [LE, GE])
def le(self, other):
if (self.comp in [LT, LE] and other.comp in [GT, GE]) \
or (self.comp in [GT, GE] and other.comp in [LT, LE]):
return True
return self.coeff < other.coeff \
or (self.coeff == other.coeff and self.comp in [LT, GT] \
and other.comp in [LE, GE])
def __cmp__(self, other):
if self.coeff == other.coeff and self.comp == other.comp:
return 0
return 1
# Stores term comp 0
# Used in the additive routine
class Zero_comparison:
def __init__(self, term, comp):
self.term = term
self.comp = comp
def __str__(self):
return str(self.term) + ' ' + comp_str[self.comp] + ' 0'
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if not isinstance(other, Zero_comparison):
return False
return self.comp == other.comp and self.term == other.term
# The multiplicative procedure makes use of inequalities like t > 1, where
# t is a Mul_term.
class One_comparison:
def __init__(self, term, comp):
self.term = term
self.comp = comp
def __str__(self):
return str(self.term) + ' ' + comp_str[self.comp] + ' 1'
def __repr__(self):
return self.__str__()
###############################################################################
#
# CANONIZING TERMS
#
# A canonical term is one of the following
# a variable or the constant 1
# an additive term ((a1, t1), ..., (a1, tk)) where
# each ti is a canonical term
# (variable, the constant 1, or multiplicative)
# t1 < t2 < ... < tk
# a1 = 1 (so the term is normalized)
# a multiplicative term ((t1, n1), ..., (tk, nk)) where
# each ti is a canonical term (variable or additive)
# n1 < n2 < ... < nk
#
# Add_pair(r, t) is said to be canonical if t is a canonical term.
#
# "canonize" converts any term to a canonical Add_pair
#
# The order for sorting is built into the term classes.
#
###############################################################################
def product(l):
return reduce((lambda x, y: x * y), l, 1)
# returns an Add_pair
def canonize(t):
if isinstance(t, Const) or isinstance(t, Var):
return Add_pair(1, t)
elif isinstance(t, Add_term):
addpairs = [canonize(p.term) * p.coeff for p in t.addpairs]
addpairs.sort()
coeff = addpairs[0].coeff
if coeff == 0:
print t, addpairs
term = Add_term([p / coeff for p in addpairs])
if len(term.addpairs) == 1:
coeff = coeff * term.addpairs[0].coeff
term = term.addpairs[0].term
return Add_pair(coeff, term)
elif isinstance(t, Mul_term):
mulpairs = [pow(canonize(p.term), p.exp) for p in t.mulpairs]
mulpairs.sort()
coeff = product([p.coeff for p in mulpairs]) * t.const
term = Mul_term([p.term for p in mulpairs])
return Add_pair(coeff, term)
elif isinstance(t, Func_term):
args = t.args
nargs = []
for p in args:
cp = canonize(p)
if cp.coeff == 1:
nargs.append(cp.term)
else:
nargs.append(cp.coeff * cp.term)
term = Func_term(t.name, nargs, 1)
return Add_pair(t.const, term)
def test_canonize():
x = Var("x")
y = Var("y")
z = Var("z")
t1 = Mul_term([(Add_term([(2, x), (-3, y), (1, z)]), 3), (x, 2)])
t2 = Mul_term([(Add_term([(2, x), (-5, y), (1, z)]), 3), (x, 2)])
t3 = Mul_term([(x, 2), (Add_term([(-3, y), (1, z), (2, x)]), 3)])
print "t1 =", t1
print "t2 =", t2
print "t3 =", t3
print "t1 < t2:", t1 < t2
print "t1 < x:", t1 < x
print "t1 == t3:", t1 == t3
print "Canonize t1:", canonize(t1)
print "Canonize t2:", canonize(t2)
print "Canonize t3:", canonize(t3)
print "Canonize x:", canonize(x)
print "canonize(t1) == canonize(t2):", canonize(t1) == canonize(t3)
# Takes an (uncanonized) Zero_comparison.
# Returns a canonized Zero_comparison with positive coefficient.
def canonize_zero_comparison(h):
canon = canonize(h.term)
if canon.coeff > 0:
return Zero_comparison(canon.term, h.comp)
elif canon.coeff < 0:
return Zero_comparison(canon.term, comp_reverse(h.comp))
else:
raise Error("0 in hypothesis")
###############################################################################
#
# NAMING SUBTERMS
#
# The heuristic procedure starts by naming all subterms. We'll use
# "IVars" for the name, e.g. a0, a1, a2, ...
#
###############################################################################
# internal variables -- just an index
class IVar(Term, Var):
def __init__(self, index):
Var.__init__(self, "a" + str(index))
self.index = index
def __str__(self):
return self.name
def __cmp__(self, other):
if isinstance(other, Const):
return 1
elif isinstance(other, Var):
return cmp(self.index, other.index)
else:
return -1
def __eq__(self, other):
# print "IVAR EQ CALLED"
if isinstance(other, IVar):
return self.index == other.index
return False
def __ne__(self, other):
if isinstance(other, IVar):
return self.index != other.index
return True
# Looks in Heuristic_data H to see if self < other is known.
def lt_rel(self, other, H):
i, j = self.index, other.index
if i > j:
return other.gt_rel(self, H)
if i == j:
return False
if i == 0 and j in H.zero_comparisons.keys():
if H.zero_comparisons[j].comp == GT:
return True
return False
signi, signj = H.sign(i), H.sign(j)
wsigni, wsignj = H.weak_sign(i), H.weak_sign(j)
if wsignj != 0:
if signi == -1 and signj == 1:
return True
if signi == 1 and signj == -1:
return False
# both signs are the same.
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
for c in comps:
if ((wsignj == 1 and ((c.comp == LT and c.coeff <= 1)\
or (c.comp == LE and c.coeff < 1))) or
(wsignj == -1 and ((c.comp == LT and (c.coeff < 0 or c.coeff >= 1))
or (c.comp == LE and (c.coeff < 0 or c.coeff > 1))))):
return True
return False
# sign info on right is unknown
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
if (any((c.comp == LT and c.coeff <= 1) or (c.comp == LE and c.coeff < 1)\
for c in comps) and \
any(((c.comp == LT and (c.coeff < 0 or c.coeff >= 1))\
or (c.comp == LE and (c.coeff < 0 or c.coeff > 1)))\
for c in comps)):
return True
return False
def gt_rel(self, other, H):
i, j = self.index, other.index
if i > j:
return other.lt_rel(self, H)
if i == j:
return False
if i == 0 and j in H.zero_comparisons.keys():
if H.zero_comparisons[j].comp == LT:
return True
return False
signi, signj = H.sign(i), H.sign(j)
wsigni, wsignj = H.weak_sign(i), H.weak_sign(j)
if wsignj != 0:
if signi == -1 and signj == 1:
return False
if signi == 1 and signj == -1:
return True
# both signs are the same.
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
for c in comps:
if ((wsignj == 1 and ((c.comp == GT and c.coeff >= 1)\
or (c.comp == GE and c.coeff > 1))) or
(wsignj == -1 and ((c.comp == GT and c.coeff <= 1)\
or (c.comp == GE and c.coeff < 1)))):
return True
return False
# sign info on right is unknown
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
if (any((c.comp == GT and c.coeff >= 1)\
or (c.comp == GE and c.coeff > 1) for c in comps) and
any((c.comp == GT and c.coeff <= 1)\
or (c.comp == GE and c.coeff < 1) for c in comps)):
return True
return False
def le_rel(self, other, H):
i, j = self.index, other.index
if i > j:
return other.ge_rel(self, H)
if i == j:
return True
if i == 0 and j in H.zero_comparisons.keys():
if H.zero_comparisons[j].comp in [GT, GE]:
return True
return False
# signi, signj = H.sign(i), H.sign(j)
wsigni, wsignj = H.weak_sign(i), H.weak_sign(j)
if wsignj != 0:
if wsigni == -1 and wsignj == 1:
return True
if wsigni == 1 and wsignj == -1:
return False
# both signs are the same.
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
for c in comps:
if (c.comp in [LE, LT] and ((wsignj == 1 and c.coeff <= 1) or
(wsignj == -1 and ((c.coeff < 0 or c.coeff >= 1))))):
return True
return False
# sign info on right is unknown
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
if (any((c.comp in [LT, LE] and c.coeff <= 1) for c in comps) and
any((c.comp in [LT, LE] and (c.coeff < 0 or c.coeff >= 1)) for c in comps)):
return True
return False
def ge_rel(self, other, H):
i, j = self.index, other.index
if i > j:
return other.le_rel(self, H)
if i == j:
return True
if i == 0 and j in H.zero_comparisons.keys():
if H.zero_comparisons[j].comp in [LT, LE]:
return True
return False
# signi, signj = H.sign(i), H.sign(j)
wsigni, wsignj = H.weak_sign(i), H.weak_sign(j)
if wsignj != 0:
if wsigni == -1 and wsignj == 1:
return False
if wsigni == 1 and wsignj == -1:
return True
# both signs are the same.
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
for c in comps:
if c.comp in [GT, GE] and ((wsignj == 1 and c.coeff >= 1) or
(wsignj == -1 and c.coeff <= 1)):
return True
return False
# sign info on right is unknown
if (i, j) in H.term_comparisons.keys():
comps = H.term_comparisons[i, j]
if (any((c.comp in [GT, GE] and c.coeff >= 1) for c in comps) and
any((c.comp in [GT, GE] and c.coeff <= 1) for c in comps)):
return True
return False
def eq_rel(self, other, H):
i, j = self.index, other.index
if i == j:
return True
if self -other in H.zero_equations or other - self in H.zero_equations:
return True
return False
def neq_rel(self, other, H):
i, j = self.index, other.index
if i > j:
return other.neq_rel(self, H)
if i == j:
return False
return self.gt_rel(other, H) or self.lt_rel(other, H)
# creates a name for every subterm in the list of terms args
# returns a list of all subterms (the ith name names the ith subterms)
# and dictionaries with all the name definitions
def make_term_names(terms):
name_defs = {}
subterm_list = [one]
name_defs[0] = one
# makes this term and all subterms have names, defining new names
# if necessary; and returns the name
#
# notes that subterm_list and name_defs are global to this procedure,
# which augments them as it recurses through t
def process_subterm(t):
if t in subterm_list:
return IVar(subterm_list.index(t))
else:
new_def = None
if isinstance(t, Var):
new_def = t
elif isinstance(t, Add_term):
addpairs = []
for a in t.addpairs:
addpairs.append(Add_pair(a.coeff, process_subterm(a.term)))
new_def = Add_term(addpairs)
elif isinstance(t, Mul_term):
mulpairs = []
for m in t.mulpairs:
mulpairs.append(Mul_pair(process_subterm(m.term), m.exp))
new_def = Mul_term(mulpairs)
elif isinstance(t, Func_term):
args = []
for m in t.args:
args.append(process_subterm(m))
new_def = Func_term(t.name, args, t.const)
l = len(subterm_list) # index of new term
subterm_list.append(t)
name_defs[l] = new_def
return IVar(l)
for t in terms:
process_subterm(t)
return subterm_list, name_defs
def test_make_term_names():
x = Var("x")
y = Var("y")
z = Var("z")
t1 = Mul_term([(Add_term([(2, x), (-3, y), (1, z)]), 3), (x, 2)])
t2 = Mul_term([(Add_term([(2, x), (-3, y), (1, z)]), 3), (x, 3)])
t3 = Mul_term([(x, 2), (Add_term([(-3, y), (1, z), (2, x)]), 3)])
t4 = Add_term([(2, t1), (3, t2), (1, x)])
terms = [t1, t2, t3, t4]
subterm_list, name_defs = make_term_names(terms)
print
print "Terms:", terms
print
print "Subterms:"
for i in range(len(subterm_list)):
print IVar(i), "=", subterm_list[i]
print
print "Definitions:"
for i in range(len(subterm_list)):
print IVar(i), "=", name_defs[i]
|
UTF-8
|
Python
| false | false | 2,014 |
12,068,858,143,290 |
5699bf7ac623997813bfc3f1f2d41c570b2d0bc3
|
442a5d3ba496f75523883a438b6dc92c2a748c46
|
/swiftest/__init__.py
|
0fa8155b58040c9d485c6fe36661c4a53de6eea3
|
[
"MIT"
] |
permissive
|
smashwilson/swiftest
|
https://github.com/smashwilson/swiftest
|
cd8f06eba04e55658fa55f1c8fd632af112860e7
|
a8cd853d513dc6f9835db7c9229a1e857952304b
|
refs/heads/master
| 2020-04-07T10:02:47.869359 | 2013-09-02T17:56:47 | 2013-09-02T17:59:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Pythonic client for OpenStack Object Storage.
>>> from client import Client
>>> cli = Client(endpoint='https://identity.api.rackspacecloud.com/v1.0/', username=USER_NAME, auth_key=AUTH_KEY)
"""
__all__ = ['VERSION']
# Version string for Swiftest. Follows rational versioning in "major.minor.path" format.
# MAJOR: Incremented when the exported public API is changed in a backwards-incompatible way.
# MINOR: Incremented when the public API has backwards-compatible changes.
# PATCH: Incremented for internal bugfixes.
VERSION='1.0.0'
from . import account
from . import client
from . import exception
from . import metadata
|
UTF-8
|
Python
| false | false | 2,013 |
10,256,381,906,879 |
a4b20c335d60a8e437196f3134251a1ee6fc569b
|
a4b41805a1d789c6ea17cfed4f180ae94e86984f
|
/pelican_comment_system/pelican_comment_system.py
|
cee8ff535903a8873714a36f2a3ec3080832296a
|
[
"AGPL-3.0-only"
] |
non_permissive
|
drewbo/pelican-plugins
|
https://github.com/drewbo/pelican-plugins
|
54a27758d77e1feacd0c08bc61cf00175e2f5ac9
|
a6234488953c540fe7ff766d0720c4fa150c08cf
|
refs/heads/master
| 2021-01-14T12:34:54.522380 | 2014-11-09T00:00:53 | 2014-11-09T00:00:53 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Pelican Comment System
======================
A Pelican plugin, which allows you to add comments to your articles.
Author: Bernhard Scheirle
"""
import logging
import os
logger = logging.getLogger(__name__)
from itertools import chain
from pelican import signals
from pelican.utils import strftime
from pelican.readers import MarkdownReader
class Comment:
def __init__(self, id, metadata, content):
self.id = id
self.content = content
self.metadata = metadata
self.replies = []
def addReply(self, comment):
self.replies.append(comment)
def getReply(self, id):
for reply in self.replies:
if reply.id == id:
return reply
else:
deepReply = reply.getReply(id)
if deepReply != None:
return deepReply
return None
def __lt__(self, other):
return self.metadata['date'] < other.metadata['date']
def sortReplies(self):
for r in self.replies:
r.sortReplies()
self.replies = sorted(self.replies)
def countReplies(self):
amount = 0
for r in self.replies:
amount += r.countReplies()
return amount + len(self.replies)
def initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
DEFAULT_CONFIG.setdefault('PELICAN_COMMENT_SYSTEM', False)
DEFAULT_CONFIG.setdefault('PELICAN_COMMENT_SYSTEM_DIR' 'comments')
if pelican:
pelican.settings.setdefault('PELICAN_COMMENT_SYSTEM', False)
pelican.settings.setdefault('PELICAN_COMMENT_SYSTEM_DIR', 'comments')
def add_static_comments(gen, metadata):
if gen.settings['PELICAN_COMMENT_SYSTEM'] != True:
return
metadata['comments_count'] = 0
metadata['comments'] = []
if not 'slug' in metadata:
logger.warning("pelican_comment_system: cant't locate comments files without slug tag in the article")
return
reader = MarkdownReader(gen.settings)
comments = []
replies = []
folder = os.path.join(gen.settings['PELICAN_COMMENT_SYSTEM_DIR'], metadata['slug'])
if not os.path.isdir(folder):
logger.debug("No comments found for: " + metadata['slug'])
return
for file in os.listdir(folder):
name, extension = os.path.splitext(file)
if extension[1:].lower() in reader.file_extensions:
content, meta = reader.read(folder + "/" + file)
meta['locale_date'] = strftime(meta['date'], gen.settings['DEFAULT_DATE_FORMAT'])
com = Comment(name, meta, content)
if 'replyto' in meta:
replies.append( com )
else:
comments.append( com )
#TODO: Fix this O(n²) loop
for reply in replies:
for comment in chain(comments, replies):
if comment.id == reply.metadata['replyto']:
comment.addReply(reply)
count = 0
for comment in comments:
comment.sortReplies()
count += comment.countReplies()
comments = sorted(comments)
metadata['comments_count'] = len(comments) + count
metadata['comments'] = comments
def register():
signals.initialized.connect(initialized)
signals.article_generator_context.connect(add_static_comments)
|
UTF-8
|
Python
| false | false | 2,014 |
13,151,189,899,348 |
2cf9e1cf87578b44c74fa05f68e2ac35a31576ef
|
7f7e07dbc0dd48e4030bd8e6ec600953a5bcea8a
|
/custom_field/admin.py
|
6fbfcef700521a11e20aee2a06ac104f031ec6d2
|
[] |
no_license
|
jjmarin/django-custom-field
|
https://github.com/jjmarin/django-custom-field
|
77f58d37afa7c46d4724ffd60c3227309a4fcdad
|
4fc927184df2d359f49587c98efb8079247907ca
|
refs/heads/master
| 2020-12-25T01:45:16.332030 | 2013-05-16T21:40:08 | 2013-05-16T21:40:08 | 12,484,636 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from models import *
class CustomFieldAdmin(admin.ModelAdmin):
list_display = ('content_type','name')
list_filter = ('content_type',)
search_fields = ('content_type__name','name')
admin.site.register(CustomField, CustomFieldAdmin)
|
UTF-8
|
Python
| false | false | 2,013 |
2,156,073,589,663 |
39ca20c076309a786e632677dac768ea8cdf8c08
|
69dbdbb135096ed2b1b74bbaa04c53a08b4d5d65
|
/r5d4/analytics_browser.py
|
3fe0df42eafcdf30b1cf7c9a92659bdad4ddf8a6
|
[
"MIT"
] |
permissive
|
wahello/r5d4
|
https://github.com/wahello/r5d4
|
7c9b835e4fb7e7dd43cf96a03ace3948e77b06b8
|
df9aa3a1aea98950890d250aba1423174fd123c4
|
refs/heads/master
| 2021-05-29T10:31:08.316318 | 2014-12-10T10:32:32 | 2014-12-10T10:32:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import absolute_import
from flask import abort
from werkzeug.exceptions import ServiceUnavailable
from r5d4.analytics import Analytics
from r5d4.flask_redis import get_conf_db, get_data_db
from r5d4.mapping_functions import DIMENSION_EXPANSION_MAP
from r5d4.utility import construct_key
def combinatorial_keys(rem_range):
"""
>>> list(combinatorial_keys([("d1", [1,2]), ("d2", [3,4])])) == \
[('d1', 1, 'd2', 3), ('d1', 1, 'd2', 4), ('d1', 2, 'd2', 3), \
('d1', 2, 'd2', 4)]
True
"""
if not rem_range:
yield ()
return
dimension, dim_range = rem_range[0]
for dim_value in dim_range:
for rest_key in combinatorial_keys(rem_range[1:]):
yield (dimension, dim_value) + rest_key
return
def browse_analytics(a_name, slice_args):
conf_db = get_conf_db()
if not conf_db.sismember("Analytics:Active", a_name):
abort(404)
analytics_definition = conf_db.get("Analytics:ByName:%s" % a_name)
if analytics_definition is None:
abort(404)
try:
analytics = Analytics(analytics_definition)
except (ValueError, AssertionError) as e:
raise ServiceUnavailable(e.args)
data_db = get_data_db(analytics["data_db"])
mapping = analytics["mapping"]
measures = analytics["measures"]
query_dimensions = set(analytics["query_dimensions"])
slice_dimensions = set(analytics["slice_dimensions"])
d_range = []
for d in slice_dimensions:
expand = DIMENSION_EXPANSION_MAP[mapping[d]["type"]]
try:
value_set = expand(slice_args[d])
d_range.append((d, value_set))
except ValueError as e:
abort(400, e.args)
except KeyError as e:
abort(400, ("Missing slice parameter", str(e.args[0])))
d_range_dict = dict(d_range)
def get_range(dimensions):
d_range = map(lambda d: (d, sorted(list(d_range_dict[d]))),
sorted(list(dimensions)))
return d_range
qnos_dimensions = query_dimensions - slice_dimensions
snoq_dimensions = slice_dimensions - query_dimensions
s_range = get_range(slice_dimensions)
snoq_range = get_range(snoq_dimensions)
for qnos in qnos_dimensions:
d_range_dict[qnos] = set()
for s_key in combinatorial_keys(s_range):
refcount_key_str = construct_key('RefCount', s_key, qnos)
d_range_dict[qnos] |= set(data_db.hkeys(refcount_key_str))
q_range = get_range(query_dimensions)
output = []
for q_key in combinatorial_keys(q_range):
row = {}
key_is_set = False
key = None
for q in q_key: # q_key=(Date,20110808,Practice,1)
if not key_is_set:
key = q
key_is_set = True
else:
row[key] = q
key_is_set = False
for measure in measures:
if mapping[measure]["type"][-5:] == "float":
is_float = True
else:
is_float = False
row[measure] = 0
snoq_keys = list(combinatorial_keys(snoq_range))
if len(snoq_keys) < 2:
if len(snoq_keys) == 1:
snoq_key = snoq_keys[0]
else:
snoq_key = None
val_key = construct_key(measure, q_key, snoq_key)
if mapping[measure]["type"] == "unique":
val = data_db.scard(val_key)
else:
val = data_db.get(val_key)
if val:
if is_float:
row[measure] = float(val)
else:
row[measure] = int(val)
else:
for snoq_key in snoq_keys:
val_key = construct_key(measure, q_key, snoq_key)
if mapping[measure]["type"] == "unique":
abort(400, (
"Measure type 'unique' cannot be aggregated"))
else:
val = data_db.get(val_key)
if val:
if is_float:
row[measure] += float(val)
else:
row[measure] += int(val)
output.append(row)
output_response = {
"status": "OK",
"data": output
}
return output_response
|
UTF-8
|
Python
| false | false | 2,014 |
3,332,894,660,300 |
6b971952d145b9ce7d39905cf697b19e60419832
|
432d77eeb6f039b932ab25cfe3fa1b3339e6cae0
|
/ex41.py
|
1578627a676e70b311ad41ba0540623df3934263
|
[] |
no_license
|
ashleymcnamara/LearnPythonTheHardWay
|
https://github.com/ashleymcnamara/LearnPythonTheHardWay
|
1c2b6f636c842ea7c4c0120cf844a687cfe0fa99
|
1dccd84fa33b5bb886d0ea33d619471bcb172de7
|
refs/heads/master
| 2020-04-22T20:03:16.056201 | 2014-01-30T00:24:03 | 2014-01-30T00:24:03 | 16,090,529 | 7 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function names *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# Do they want to drill phrases first?
# Line 30 Sets variable "PHRASE_FIRST" to False or under certian circumstances True
# Line 31 Meaning you have to have passed at least two arguments to the program (cont)
# (the first being the program name itself), and said second argument has to be "english"
# Line 32 Resets "PHRASE_FIRST" to be true IF len(sys.argv) == 2 and sys.argv[1] == "english"
# Sys.argv is the command line arguments, starting with the name of the program, i.e. exercise_41.py
# English becomes ['exercise_41.py', 'english'], and said second argument (sys.argv[1]) has to be "english"
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
# Load up the words from the website
# For word in URLOPEN(word_url).readlines():
# Use a for loop and a local copy of the word list
# For word in open 'words.txt', read
# Words.append [word.strip()
# Using list comprehensions.
WORDS = [word.strip() for word in
open('words.txt', 'r')]
# Line 47, define convert and give it snippet and phrase paramaters
# Lines 48 - 52, Return a list of random words for each "%%%" pattern in snippet string
# Capitalize each word in the list
# Return the list
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
# Line 54, a number for each paramater placement
for i in range(0, snippet.count("@@@")):
# Line 56, insert 1 or 2 random words
param_count = random.randint(1,3)
# Line 58, Seperate the words by a comma.
param_names.append(', '.join(random.sample(WORDS, param_count)))
# Shortening the list, creating a new list with a reference to the contained objects
# List (start:step:end)
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
UTF-8
|
Python
| false | false | 2,014 |
12,781,822,685,787 |
a714f8577df962d5784e1ed9454e8bdb7692779d
|
f061602595a78bdbdbf32e2dfdcfe623db5b8efd
|
/graph/management/commands/week_report2.py
|
3b5f7682469f90fd6b25cf6d71b44199b4fd5748
|
[] |
no_license
|
NorbertMichalski/utilities
|
https://github.com/NorbertMichalski/utilities
|
b9e0643d4b8e0097e0c774d63adbeaa66d3da06b
|
da27a23add9c42d62ae21a5e74eef920bbd3d839
|
refs/heads/master
| 2020-05-14T19:04:23.262384 | 2014-01-27T13:45:28 | 2014-01-27T13:45:28 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import EmailMessage
from graph.models import OverviewStat, OverviewGraph
from prices.models import Brand, Result
import datetime
import csv
def send_email(file_attached):
subject = "weekly report"
body = "Attached is the weekly statistics report."
email = EmailMessage(subject, body, 'reports@shopmro.com',
['stats.infographics@gmail.com', 'mmenashe@mechdrives.com' ])
email.attach_file(file_attached)
email.send(fail_silently=False)
def weekly_stats(graph_pk):
stats = OverviewStat.objects.filter(graph=graph_pk).order_by('date')
current_week = stats[0].date.isocalendar()[1]
weekly_money, weekly_price, weekly_rank, weekly_visits, weekly_sales = 0, 0, 0, 0, 0
counter = 1
for stat in stats:
week = stat.date.isocalendar()[1]
if week != current_week:
break
weekly_money += float('%.2f' %stat.get_money())
weekly_price += float('%.2f' %(stat.get_price()/10,))
weekly_rank += stat.get_rank()
weekly_visits += stat.get_visits()
weekly_sales += stat.get_sales()
counter += 1
return weekly_visits, weekly_sales, weekly_money, int(weekly_rank/counter)
class Command(BaseCommand):
def handle(self, *args, **options):
# override last week's report
f = open('/home5/shopmroc/utilities/reports/weekly_report.csv', 'wb')
writer = csv.writer(f, dialect='excel')
data = ['Brand', 'Total visits', 'Average visits', 'Total sales', 'Average sales',
'Prices changed', 'Number prices changes', 'Average rank' ]
writer.writerow(data)
f.close()
# count the changes of prices
brands = Brand.objects.all()
counter = {'All':0}
for brand in brands:
counter[brand.name] = 0
all_results = Result.objects.all().prefetch_related('product')
for result in all_results:
if result.changed:
brand_name = result.product.brand.name
counter[brand_name] += 1
print counter
sales, visits = [], []
prices_changed, rank_changed = False, False
# if not monday get last moday's report
today = datetime.date.today()
week_day = today.weekday()
if week_day:
today = today - datetime.timedelta(days=week_day)
print today
# get all brands
graphs = OverviewGraph.objects.all()[:5]
f = open('/home5/shopmroc/utilities/reports/weekly_report.csv', 'ab')
writer = csv.writer(f, dialect='excel')
for graph in graphs:
try:
counter[graph.brand]
except KeyError:
continue
if counter[graph.brand]:
prices_changed = 'Yes'
else:
prices_changed = 'No'
# get the week statistics
weekly_visits, weekly_sales, weekly_money, weekly_rank = weekly_stats(graph.pk)
data = [graph.brand, weekly_visits, weekly_visits/7, weekly_sales,
weekly_sales/7, prices_changed, counter[graph.brand], weekly_rank,
]
if graph.brand == 'All':
total_sales = weekly_money
data = ['All brands', weekly_visits, weekly_visits/7, weekly_sales,
weekly_sales/7, '', '', weekly_rank,]
writer.writerow(data)
writer.writerow([])
data = ['Total profit', total_sales]
writer.writerow(data)
data = ['Average day profit', '%.2f' %(total_sales/7,)]
writer.writerow(data)
f.close()
send_email('/home5/shopmroc/utilities/reports/weekly_report.csv')
|
UTF-8
|
Python
| false | false | 2,014 |
13,408,887,930,190 |
0cc4380cba2ba35dce3ee0049df97e1e689ad955
|
9e496c19641c695e6ed404892b76e7c72e8dc4b1
|
/good_acc/acceptance/helper_functions.py
|
2f2b2bd2a3f06cbb5af957cf1da53293ae7fd21c
|
[] |
no_license
|
wolet/zTools
|
https://github.com/wolet/zTools
|
193bfbdfab2d0cc8ae5883dda22fd1ac6900c70c
|
4d2508fa0e030cf8eb8b8f5146be7dce973ef351
|
refs/heads/master
| 2021-01-22T02:04:38.971153 | 2013-05-02T14:48:59 | 2013-05-02T14:48:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import datetime
import random
from string import split
from django.db.models.query_utils import Q
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.defaultfilters import slugify
import xlwt
def render_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
|
UTF-8
|
Python
| false | false | 2,013 |
6,305,011,991,070 |
a4775b21279560a1369769baf1a8da8f90ae1dc4
|
a2719a49ae165433f905182918265b31d0bfff98
|
/doc/install/Linux/Niflheim/el6-dl160g6-tm-intel-2013.1.117-openmpi-1.6.3-mkl-2013.1.117-sl-hdf5-1.8.10.py
|
f326d27b2e1a7f1e5952b87b030295393bd516ab
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
non_permissive
|
robwarm/gpaw-symm
|
https://github.com/robwarm/gpaw-symm
|
6c3128db17f430e8f76a924b2e5a1e3986ba254d
|
2848c068496c30e77829a16bbf3edeba6a1b4cf3
|
refs/heads/master
| 2021-01-01T17:15:58.705208 | 2014-08-16T20:13:54 | 2014-08-16T20:13:54 | 18,296,739 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
nodetype = 'dl160g6'
scalapack = True
compiler = 'icc'
libraries =[
'mkl_def',
'mkl_scalapack_lp64', 'mkl_intel_lp64', 'mkl_sequential',
'mkl_core', 'mkl_blacs_openmpi_lp64',
'hdf5',
'xc',
'mpi',
]
library_dirs =[
'/home/opt/common/intel-compilers-2013.1.117/compiler/lib/intel64',
'/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-intel-2013.1.117-1/lib',
'/home/opt/common/intel-mkl-2013.1.117/mkl/lib/intel64',
'/home/opt/common/intel-mkl-2013.1.117/compiler/lib/intel64',
'/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-intel-2013.1.117-openmpi-1.6.3-1/lib',
'/home/opt/el6/' + nodetype + '/libxc-2.0.1-' + nodetype + '-intel-2013.1.117-1/lib',
]
include_dirs +=[
'/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-intel-2013.1.117-1/include',
'/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-intel-2013.1.117-openmpi-1.6.3-1/include',
'/home/opt/el6/' + nodetype + '/libxc-2.0.1-' + nodetype + '-intel-2013.1.117-1/include',
]
extra_link_args =[
'-Wl,-rpath=/home/opt/common/intel-compilers-2013.1.117/compiler/lib/intel64'
',-rpath=/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-intel-2013.1.117-1/lib'
',-rpath=/home/opt/common/intel-mkl-2013.1.117/mkl/lib/intel64'
',-rpath=/home/opt/common/intel-mkl-2013.1.117/compiler/lib/intel64'
',-rpath=/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-intel-2013.1.117-openmpi-1.6.3-1/lib'
',-rpath=/home/opt/el6/' + nodetype + '/libxc-2.0.1-' + nodetype + '-intel-2013.1.117-1/lib'
]
extra_compile_args =['-xHOST', '-O3', '-ipo', '-std=c99', '-fPIC', '-Wall']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
mpicompiler = '/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-intel-2013.1.117-1/bin/mpicc'
mpilinker = mpicompiler
platform_id = nodetype
hdf5 = True
|
UTF-8
|
Python
| false | false | 2,014 |
10,849,087,390,650 |
c8ffb7760cfc2f07e5ffd7f76373478357319411
|
12c4ab2b18365933be93c3eeb02e8416f9efe043
|
/script/gbt.py
|
be91923e91e249f2f263883e282f2f281d39e0ae
|
[] |
no_license
|
bluekingsong/simple-gbdt
|
https://github.com/bluekingsong/simple-gbdt
|
ca00a3b83e95962af926ba4381606347ddc0bba9
|
1b6ace901588e2674b8604b9171414c77a96486b
|
refs/heads/master
| 2021-01-13T04:44:52.329558 | 2013-04-10T12:30:58 | 2013-04-10T12:30:58 | 9,343,914 | 44 | 46 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from data import DataSet;
from tree import construct_decision_tree;
from model import Model;
from random import uniform,sample;
from sys import argv;
def main(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points):
dataset=DataSet(data_filename);
print "Model parameters configuration:[data_file=%s,stat_file=%s,max_iter=%d,sample_rate=%f,learn_rate=%f,max_depth=%d,split_points=%d]"%(data_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points);
dataset.describe();
stat_file=open(stat_filename,"w");
stat_file.write("iteration\taverage loss in train data\tprediction accuracy on test data\taverage loss in test data\n");
model=Model(max_iter,sample_rate,learn_rate,max_depth,split_points);
#train_data=dataset.get_instances_idset();
#test_data=train_data;
train_data=sample(dataset.get_instances_idset(),int(dataset.size()*2.0/3.0));
test_data=set(dataset.get_instances_idset())-set(train_data);
model.train(dataset,train_data,stat_file,test_data);
#model.test(dataset,test_data);
stat_file.close();
if __name__=="__main__":
input_filename="data/adult_part.csv";
input_filename="data/adult.data.csv";
if len(argv)!=8:
print "usage:",argv[0],"data_filename stat_filename max_iter sample_rate learn_rate max_depth split_points";
print "for example:",argv[0],"data/adult.data.csv output/adult.data.stat 50 0.4 0.1 1 -1";
print "#"*60;
print "data_filename: the csv datafile used to train and test( random split 1/3 of data as test part";
print "stat_filename: the file to hold ouput information about prediction accuracy and loss value in each iteration";
print "max_iter: set the iterations in gradient boost algorithm";
print "sample_rate: subsample rate of train data to construct a single decision tree";
print "learn_rate: the learn rate parameter of gradient boost algorithm";
print "max_depth: the maximum depth of a decision tree, max_depth=1 means a decision stump with depth=1";
print "split_points: if a feature is numeric and has many distinct values, it's very slow to find a optimal split point.i use just $split_points$ values to find optimal split point of tree. 0 and negative $split_points$ means do not use the trick";
print "#"*60;
else:
input_filename=argv[1];
stat_filename=argv[2];
max_iter=int(argv[3]);
sample_rate=float(argv[4]);
learn_rate=float(argv[5]);
max_depth=int(argv[6]);
split_points=int(argv[7]);
main(input_filename,stat_filename,max_iter,sample_rate,learn_rate,max_depth,split_points);
|
UTF-8
|
Python
| false | false | 2,013 |
5,600,637,366,832 |
a7d4235a629fc2581062efc2194be562538c7773
|
63386f1e8a8e687fe475d0a348ca2178c433a570
|
/display.py
|
ccdb41a43cbeb7da260c9d16f328d8d1e4f54a85
|
[] |
no_license
|
gestapolur/nyan-233-crawler
|
https://github.com/gestapolur/nyan-233-crawler
|
9cdd3e6967908cfc4c76cda88280ca8e44205906
|
b8a0a5f0e0ab2ec061a9b7383e25a9116aee92e3
|
refs/heads/master
| 2020-04-27T03:06:10.903052 | 2013-06-21T15:26:06 | 2013-06-21T15:26:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 Gestalt Lur.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import os
#from dbmodels import zh_user, temp_user, dead_zh_user, Count
from dbmodels import Count
from google.appengine.ext import db
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class MainHandler(webapp2.RequestHandler):
def get(self):
count = Count.all().get()
template_values = {
'zh_users_active': count.user_active,
'zh_users_dead': count.user_dead,
'count_start_date': count.count_start_date,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([('/', MainHandler)], debug=True)
|
UTF-8
|
Python
| false | false | 2,013 |
1,580,547,980,985 |
8076b61ef93686fcb13b3b02e915720d9b87dc33
|
52fdec3a921fd945a71fd0712d87c9006a4b3417
|
/server.py
|
97ab3f198a6d6a380934e8756311c8ec381ce991
|
[] |
no_license
|
ohmy207/t_m_blog
|
https://github.com/ohmy207/t_m_blog
|
89410843b3c6da418fe1a306f07e8eb2e3e548fd
|
a7537eb91461d353d56e3b81980ea33df4e5705f
|
refs/heads/master
| 2016-09-09T21:48:47.699004 | 2014-06-12T11:49:55 | 2014-06-12T11:49:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import tornado.ioloop
import sys
import config
from application import Application
def main():
application = Application()
application.listen(config.PORT)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
6,717,328,867,850 |
10e57cf4c5de06f56e7325f6c64b71a60c2cf09e
|
395c4baf360cdedc5f1b5665f35c8d82479daaec
|
/InClass/InClass05/InClass5_2.py
|
ba780350d905416e5ba5e492ab28cf17039b84e0
|
[] |
no_license
|
rconnors/CMDA
|
https://github.com/rconnors/CMDA
|
36239d9ca4d06e50f355ec6b6c054c11ee5b1fa4
|
d15e17b659a75cfb158235e3543f80fd99be1ede
|
refs/heads/master
| 2016-09-02T21:11:01.956431 | 2014-12-09T01:15:01 | 2014-12-09T01:15:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# 1) Create a new ipython notebook “Inclass5_2”
# import numpy with shorthand 'np'
import numpy as np
# 2) Create two one-dimensional array with 5 elements of your choice. Display array’s shape and type.
# data1, a list of 5 strings
d1 = ['0.0', '1.0', '4.0' ,'9.0', '16.0']
# data2, a list of 5 ints
d2 = [0, 1, 2, 3, 4]
# build arrays from data lists
a1 = np.array(d1)
a2 = np.array(d2)
# Display a1's shape and type.
print 'a1-shape: ', a1.shape
print 'a1-type: ', a1.dtype
# Display a2's shape and type.
print 'a2-shape: ', a2.shape
print 'a2-type: ', a2.dtype
# 3) Do element-wise summation for the two arrays.
np.add(a1.astype(np.float), a2)
# 4) Do element-wise product for the two arrays.
a1.astype(np.float) * a2
# 5) Create a 6X6 identity matrix.
ident = np.eye(6,6)
# 6) Replace all element on third row with value 5.
ident[2,] = 5
# 7) Replace all elements that are not zero with value 6 using a boolean indexing and slicing.
ident[ident!=0] = 6
# 8) Create an empty 3 dimensional array, arr3 with shape (2,3,4), and elements of integer type.
arr3 = np.empty((2,3,4))
# 9) Display its number of dimensions, shape and type of each element.
print 'arr3 dimensions:', arr3.ndim
print 'arr3 shape:', arr3.shape
print 'arr3 data type:', arr3.dtype
# 10) Give the second element on the third dimension, from the second group on the second dimension,
# from the first group on the first dimension the value 5.
arr3[0,1,1] = 5
# 11) Generate an array of 20 uniformly distributed random numbers with values between 0 and 1.
randy = rand(20)
# In[45]:
# 12) Get the min, max, sum, mean, and standard deviation of the array in part 11
print 'min:', np.min(randy)
print 'max:', np.max(randy)
print 'standard deviation: ', np.std(randy)
# 13) Replace all elements less than 0.5 with 0 and all elements larger than 0.5 with 1 in the array from
# part 11 using “where” function.
roundedRandy = np.where(randy > 0.5, 1, 0)
# 14) Sort the array in part 11
np.sort(randy)
# 15) Find the unique values in the same array.
unique(randy)
|
UTF-8
|
Python
| false | false | 2,014 |
14,843,406,996,026 |
3fca15bdeb829b5a368327323ebb176ff86b731a
|
a2ac73af04a07bb070cd85c88778608b561dd3e4
|
/exceptions.py
|
b4bf46927e4fa3a8f631a4aa399b3e8baec074f7
|
[] |
no_license
|
sannareddy/openerp-heimai
|
https://github.com/sannareddy/openerp-heimai
|
c849586d6099cc7548dec8b3f1cc7ba8be49594a
|
58255ecbcea7bf9780948287cf4551ed6494832a
|
refs/heads/master
| 2021-01-15T21:34:46.162550 | 2014-05-13T09:20:37 | 2014-05-13T09:20:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/usr/share/pyshared/openerp/exceptions.py
|
UTF-8
|
Python
| false | false | 2,014 |
6,090,263,667,694 |
f1586c8e79366655087957a94de0d43e9dbc19d7
|
c91820461eb9a1b01e829b234c4840fd78bb779f
|
/check_systemd_service
|
f232c5da4ceff03867adad064e726143150378e3
|
[
"GPL-3.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
tomas-edwardsson/check_systemd_service
|
https://github.com/tomas-edwardsson/check_systemd_service
|
8c1298da79796fac8d61ccf9ef86d0029c210c9a
|
057f02e388daf8d5afbe3de1dcd9af7d99189ff5
|
refs/heads/master
| 2016-08-06T13:51:22.579757 | 2013-12-20T00:42:09 | 2013-12-20T00:42:09 | 15,297,669 | 0 | 2 | null | false | 2015-10-20T03:32:36 | 2013-12-19T00:18:06 | 2013-12-20T00:42:09 | 2015-10-20T03:32:36 | 84 | 1 | 2 | 1 |
Python
| null | null |
#!/usr/bin/python
# Copyright 2013, Tomas Edwardsson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
from pynag.Plugins import PluginHelper
def main():
p = PluginHelper()
# Warn on inactive
level = 2
service_status = get_service_status(sys.argv[1])
if loaded(service_status)[0] is False:
p.exit(3,
"%s - %s" % ( service_status['name'],
loaded(service_status)[1]),
"\n" + service_status['unparsed'])
active = service_status['headers']['Active'][0]
if active.startswith("inactive") or active.startswith('failed'):
p.add_status(level)
elif active.startswith("active"):
p.add_status(0)
else:
p.add_status(3)
p.add_summary("%s - %s" % ( service_status['name'], active))
p.add_long_output("\n" + service_status['unparsed'])
p.exit()
def loaded(stat):
if stat['headers']['Loaded']:
if stat['headers']['Loaded'][0].startswith("error"):
return (False, stat['headers']['Loaded'][0])
return (True, stat['headers']['Loaded'][0])
def get_service_status(service):
stdout = Popen(["systemctl", "status", service], stdout=PIPE).communicate()[0]
stdout_lines = stdout.split("\n")
name = stdout_lines.pop(0).split()[0]
headers = {}
while len(stdout_lines):
l = stdout_lines.pop(0).strip()
if l == "":
break
k, v = l.split(': ', 1)
if k in headers:
headers[k].append(v)
else:
headers[k] = [v]
return { "name": name, "headers": headers, "journal": stdout_lines, "unparsed": stdout }
if __name__ == "__main__":
main()
# vim: sts=4 expandtab
|
UTF-8
|
Python
| false | false | 2,013 |
13,443,247,664,275 |
2f25310fc64ca4ccac1225db9cef78147e725da1
|
2a5b56b64a78f22113a70cc97efbd0919fe93c3d
|
/linkdrop/tests/lib/test_metrics.py
|
0823fe09b869dfdebd9a7bfc853a7b6eb79a4326
|
[
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.1"
] |
non_permissive
|
mozilla/f1
|
https://github.com/mozilla/f1
|
aa68ae82eb2cf90a129249a090d2070379c88a60
|
de2850d75492ed7b99b0d06c2549eb82842fba4c
|
refs/heads/develop
| 2023-07-03T16:19:30.755625 | 2011-05-05T18:20:22 | 2011-05-05T18:20:22 | 1,048,692 | 35 | 16 |
NOASSERTION
| false | 2022-01-19T14:27:35 | 2010-11-03T18:22:44 | 2019-07-15T10:05:31 | 2019-03-29T04:42:10 | 13,339 | 50 | 10 | 4 |
Python
| false | false |
from linkdrop.lib import metrics
from linkdrop.tests import TestController
from mock import Mock
from nose import tools
class TestMetricsConsumer(TestController):
@tools.raises(NotImplementedError)
def test_consume_raises_notimplemented(self):
mc = metrics.MetricsConsumer()
mc.consume('somedata')
class TestMetricsCollector(TestController):
def setUp(self):
self.consumer = Mock()
self.collector = metrics.MetricsCollector(self.consumer)
def test_get_distinct_attr(self):
res = self.collector._get_distinct_attrs(None)
tools.eq_(res, dict())
distinct_ob = dict(foo='bar', baz='bawlp')
res = self.collector._get_distinct_attrs(distinct_ob)
tools.eq_(res, distinct_ob)
tools.assert_raises(NotImplementedError,
self.collector._get_distinct_attrs,
list())
def test_track_not_enabled(self):
self.collector.enabled = False
distinct_ob = dict(foo='bar', baz='bawlp')
self.collector.track(distinct_ob, 'id')
self.consumer.consume.assert_not_called()
def test_track(self):
distinct_ob = dict(foo='bar', baz='bawlp')
self.collector.track(distinct_ob, 'id', hey='now')
self.consumer.consume.assert_called_once()
data = self.consumer.consume.call_args[0][0]
tools.ok_(data.pop('when', False))
distinct_ob.update(dict(id='id', hey='now'))
tools.eq_(data, distinct_ob)
|
UTF-8
|
Python
| false | false | 2,011 |
1,889,785,621,586 |
b240f01c1325b2c5bcbab7256efb8cbcb64e963e
|
bf5c467edbca274ebfc37a9a1bd6555c37dcf7fb
|
/xkcd.py
|
afbf781c72e52b4feb990e4b651cdbf027cd6881
|
[] |
no_license
|
mvd7793/XKCD-Hash-Generator
|
https://github.com/mvd7793/XKCD-Hash-Generator
|
a086821f3011fa61125a6ce9f0ea062cfd1f95d8
|
432ef0fd604a90739399224c85816224a81e1771
|
refs/heads/master
| 2016-09-05T21:03:15.447558 | 2013-04-01T22:52:55 | 2013-04-01T22:52:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib.request, urllib.parse, urllib.error
import random
from skein import skein1024
import os
diff_table = {}
def main():
min_bits = 405
xkcd_hash = "5b4da95f5fa08280fc9879df44f418c8f9f12ba424b7757de02bbdfbae0d4c4fdf9317c80cc5fe04c6429073466cf29706b8c25999ddd2f6540d4475cc977b87f4757be023f19b8f4035d7722886b78869826de916a79cf9c94cc79cd4347d24b567aa3e2390a573a373a48a5e676640c79cc70197e1c5e7f902fb53ca1858b6"
# Generate diff table
for a in range(16):
diff_table[a] = {}
for b in range(16):
c = a ^ b
diff = 0
while c != 0:
if c % 2 == 1:
diff += 1
c = c >> 1
diff_table[a][b] = diff
word = str(os.getpid() + random.randint(0, 100000))
word = hashWord(word)
count = 0
while True:
count += 1
if count % 100000 == 0:
print(count)
word_hash = hashWord(word)
diff = hash_diff(word_hash, xkcd_hash)
if (diff < min_bits):
min_bits = diff
print("Found a new word \"" + word + "\" that only differs by " + str(diff) + " bits")
sendWord(word)
word = word_hash
def hashWord(word):
""" Returns a hash for a given word """
h = skein1024(word.encode('UTF-8'), digest_bits=1024)
return h.hexdigest()
def hash_diff(hash1, hash2):
""" Returns the difference between two hashes """
diff = 0
for x in range(256):
diff += diff_table[int(hash1[x], 16)][int(hash2[x], 16)]
return diff
def sendWord(word):
""" Returns the number of bits the word is off by """
data = 'hashable=' + word
url = urllib.request.urlopen('http://almamater.xkcd.com/?edu=uiuc.edu', data.encode('UTF-8'))
url.close()
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,013 |
4,707,284,184,136 |
c6e4012fe4e8ea21adf8dca9acd6fca02a09cf61
|
7ad9412c643822bd4290dba262a2a9fda0208941
|
/tools/lib/python2.6/dist-packages/gwibber/resources.py
|
862536f2660fd1e186bab5f0d06a86ad844f8f56
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
non_permissive
|
lucchouina/Rim
|
https://github.com/lucchouina/Rim
|
b62182067eeb1cbd4778f9bb0e59845fbae85d29
|
91d841b8d45127bd223430de154d013f2e26335e
|
refs/heads/master
| 2016-09-09T18:22:49.923843 | 2013-09-17T14:59:32 | 2013-09-17T14:59:32 | 12,705,227 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
/usr/share/pyshared/gwibber/resources.py
|
UTF-8
|
Python
| false | false | 2,013 |
128,849,025,102 |
af6a65cc0beb6430012f448e5e399bb10d0c1e40
|
a7b52efaef380014f0f4e53ecb46d903eb1e2a0d
|
/tests/testsCartas.py
|
459c21a2ee01e905a3d09dce994aaa5a252d7e41
|
[] |
no_license
|
masuar/pyMus
|
https://github.com/masuar/pyMus
|
a214af1333b60561b904b6834fc700e5ec0960d6
|
e5157b3483eeafce6818765cc6097756eee3edd8
|
refs/heads/master
| 2017-09-24T00:09:56.234413 | 2013-01-07T17:53:42 | 2013-01-07T17:53:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 *-*
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath(".."))
import cartas
class TestsCartas(unittest.TestCase):
def test_createCarta_oros_OK(self):
carta = cartas.Carta(5, "Oros")
self.assertTrue(carta.numero == 5)
self.assertTrue(carta.palo == "Oros")
self.assertTrue(str(carta) == "5 de Oros")
def test_createCarta_copas_OK(self):
carta = cartas.Carta(5, "Copas")
self.assertTrue(carta.numero == 5)
self.assertTrue(carta.palo == "Copas")
self.assertTrue(str(carta) == "5 de Copas")
def test_createCarta_espadas_OK(self):
carta = cartas.Carta(5, "Espadas")
self.assertTrue(carta.numero == 5)
self.assertTrue(carta.palo == "Espadas")
self.assertTrue(str(carta) == "5 de Espadas")
def test_createCarta_bastos_OK(self):
carta = cartas.Carta(5, "Bastos")
self.assertTrue(carta.numero == 5)
self.assertTrue(carta.palo == "Bastos")
self.assertTrue(str(carta) == "5 de Bastos")
def test_createCarta_numero1_es_as_OK(self):
carta = cartas.Carta(1, "Oros")
self.assertTrue(carta.nombre == "As")
self.assertTrue(str(carta) == "As de Oros")
def test_createCarta_numero10_es_sota_OK(self):
carta = cartas.Carta(10, "Oros")
self.assertTrue(carta.nombre == "Sota")
self.assertTrue(str(carta) == "Sota de Oros")
def test_createCarta_numero11_es_caballo_OK(self):
carta = cartas.Carta(11, "Oros")
self.assertTrue(carta.nombre == "Caballo")
self.assertTrue(str(carta) == "Caballo de Oros")
def test_createCarta_numero12_es_rey_OK(self):
carta = cartas.Carta(12, "Oros")
self.assertTrue(carta.nombre == "Rey")
self.assertTrue(str(carta) == "Rey de Oros")
def test_createCarta_numero5_es_5_OK(self):
carta = cartas.Carta(5, "Oros")
self.assertTrue(carta.nombre == "5")
def test_createCarta_wrongpalo_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, 5, "Bastos2")
def test_createCarta_nonepalo_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, 5, None)
def test_createCarta_nonenumber_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, None, "Bastos")
def test_createCarta_lessernumber_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, -2, "Bastos")
def test_createCarta_largernumber_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, 15, "Bastos")
def test_createCarta_eightnumber_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, 8, "Bastos")
def test_createCarta_ninenumber_RaiseException(self):
self.assertRaises(Exception, cartas.Carta, 9, "Bastos")
def test_createMazo_OK(self):
mezcladorFake = cartas.MezcladorFake()
mazo = cartas.Mazo(mezcladorFake)
self.assertTrue(len(mazo.cartas) == 40)
self.__assertPalo(mazo, "Oros", 0)
self.__assertPalo(mazo, "Copas", 10)
self.__assertPalo(mazo, "Espadas", 20)
self.__assertPalo(mazo, "Bastos", 30)
def test_createMazo_noshuffler_RaisesException(self):
self.assertRaises(Exception, cartas.Mazo, None)
def test_mezclarMazo_OK(self):
mezcladorFake = cartas.MezcladorFake()
mazo = cartas.Mazo(mezcladorFake)
mazo.mezcla()
self.assertTrue(mezcladorFake.NumMezclas == 1)
self.__assertPalo_a_la_inversa(mazo, "Bastos", 0)
self.__assertPalo_a_la_inversa(mazo, "Espadas", 10)
self.__assertPalo_a_la_inversa(mazo, "Copas", 20)
self.__assertPalo_a_la_inversa(mazo, "Oros", 30)
def test_cortarMazo_OK(self):
mezcladorFake = cartas.MezcladorFake()
mazo = cartas.Mazo(mezcladorFake)
mazo.corta()
self.assertTrue(mezcladorFake.NumCortes == 1)
self.assertTrue(len(mazo.cartas) == 40)
self.__assertPalo(mazo, "Espadas", 0)
self.__assertPalo(mazo, "Bastos", 10)
self.__assertPalo(mazo, "Oros", 20)
self.__assertPalo(mazo, "Copas", 30)
def test_sacarCartas_OK(self):
mezcladorFake = cartas.MezcladorFake()
mazo = cartas.Mazo(mezcladorFake)
totalCartas = len(mazo.cartas)
numCartasASacar = 4
cartasSacadas = mazo.saca(numCartasASacar)
self.assertTrue(len(mazo.cartas) == totalCartas - numCartasASacar)
self.assertTrue(len(cartasSacadas) == numCartasASacar)
self.assertTrue(cartasSacadas[0].numero == 1)
self.assertTrue(cartasSacadas[0].palo == "Oros")
self.assertTrue(cartasSacadas[1].numero == 2)
self.assertTrue(cartasSacadas[1].palo == "Oros")
self.assertTrue(cartasSacadas[2].numero == 3)
self.assertTrue(cartasSacadas[2].palo == "Oros")
self.assertTrue(cartasSacadas[3].numero == 4)
self.assertTrue(cartasSacadas[3].palo == "Oros")
def __assertPalo(self, mazo, palo, starting):
for i in range(0, 7):
self.assertTrue(mazo.cartas[starting + i].numero == i + 1)
self.assertTrue(mazo.cartas[starting + i].palo == palo)
self.assertTrue(mazo.cartas[starting + 7].numero == 10)
self.assertTrue(mazo.cartas[starting + 7].palo == palo)
self.assertTrue(mazo.cartas[starting + 8].numero == 11)
self.assertTrue(mazo.cartas[starting + 8].palo == palo)
self.assertTrue(mazo.cartas[starting + 9].numero == 12)
self.assertTrue(mazo.cartas[starting + 9].palo == palo)
def __assertPalo_a_la_inversa(self, mazo, palo, starting):
self.assertTrue(mazo.cartas[starting + 0].numero == 12)
self.assertTrue(mazo.cartas[starting + 0].palo == palo)
self.assertTrue(mazo.cartas[starting + 1].numero == 11)
self.assertTrue(mazo.cartas[starting + 1].palo == palo)
self.assertTrue(mazo.cartas[starting + 2].numero == 10)
self.assertTrue(mazo.cartas[starting + 2].palo == palo)
for i in range(3, 9):
self.assertTrue(mazo.cartas[starting + i].numero == 10 - i)
self.assertTrue(mazo.cartas[starting + i].palo == palo)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,013 |
429,496,751,082 |
81c80ea52c9b6f80217a730adb1d75206f402734
|
b03621a8bcef517cb9f6de9e168cd24e86a85232
|
/egads/egads/algorithms/thermodynamics/density_dry_air_cnrm.py
|
2a43e0ed6908261439e324a30779d2cad0ecf79e
|
[
"BSD-3-Clause"
] |
permissive
|
mfreer/eufar-egads
|
https://github.com/mfreer/eufar-egads
|
ed20944dc0ea5c4a3b75a8e3c9cf0e8d675d2cbe
|
05fce4d36f070587171506caa8b136508fa9405c
|
refs/heads/master
| 2021-01-10T18:46:28.109667 | 2013-09-02T22:00:28 | 2013-09-02T22:00:28 | 33,680,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = "mfreer"
__date__ = "$Date:: $"
__version__ = "$Revision:: $"
__all__ = ["DensityDryAirCnrm"]
import egads.core.egads_core as egads_core
import egads.core.metadata as egads_metadata
class DensityDryAirCnrm(egads_core.EgadsAlgorithm):
"""
FILE density_dry_air_cnrm.py
VERSION $Revision$
CATEGORY Thermodynamics
PURPOSE Calculates density of dry air
DESCRIPTION Calculates density of dry air given static temperature and
pressure. If virtual temperature is used instead of static, this
algorithm calculates density of humid air.
INPUT P_s vector hPa static pressure
T_s vector K or C static temperature
OUTPUT rho vector kg/m3 density
SOURCE CNRM/GMEI/TRAMM
REFERENCES Equation of state for a perfect gas, Triplet-Roche, page 34.
"""
def __init__(self, return_Egads=True):
egads_core.EgadsAlgorithm.__init__(self, return_Egads)
self.output_metadata = egads_metadata.VariableMetadata({'units':'kg/m^3',
'long_name':'density',
'standard_name':'air_density',
'Category':['Thermodynamic', 'Atmos State']})
self.metadata = egads_metadata.AlgorithmMetadata({'Inputs':['P_s', 'T_s'],
'InputUnits':['hPa', 'K'],
'Outputs':['rho'],
'Processor':self.name,
'ProcessorDate':__date__,
'ProcessorVersion':__version__,
'DateProcessed':self.now()},
self.output_metadata)
def run(self, P_s, T_s):
return egads_core.EgadsAlgorithm.run(self, P_s, T_s)
def _algorithm(self, P_s, T_s):
R_a = 287.05 #J/kg/K
rho = (P_s * 100) / (R_a * T_s)
return rho
|
UTF-8
|
Python
| false | false | 2,013 |
1,159,641,217,821 |
1fd3268e8ad545ed5e2ff864d74c16758da3ff0c
|
5e02a8a9866d0dbbce93114c5cba5a064d524786
|
/ivle/webapp/filesystem/diff/__init__.py
|
10cdf55cfad4bfd58cbd9e043508bcc648b451d0
|
[
"GPL-2.0-only"
] |
non_permissive
|
dcoles/ivle
|
https://github.com/dcoles/ivle
|
65c665edb5b06995cf3c5c014cd6a3251639a276
|
c2482f4af414434e681b91368825a26b0d18751b
|
refs/heads/master
| 2021-01-15T20:29:21.894660 | 2011-08-24T08:24:36 | 2011-08-24T08:24:36 | 2,286,095 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# IVLE - Informatics Virtual Learning Environment
# Copyright (C) 2007-2009 The University of Melbourne
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Author: David Coles, Will Grant
'''Components of the webapp for diffing user files.'''
import os
import re
import cgi
try:
import json
except ImportError:
import simplejson as json
import genshi
import ivle.interpret
from ivle.webapp.base.xhtml import XHTMLView
from ivle.webapp.base.plugins import ViewPlugin, MediaPlugin
from ivle.webapp.errors import NotFound, BadRequest
from ivle.webapp.filesystem import make_path_breadcrumbs
from ivle.webapp import ApplicationRoot
class DiffView(XHTMLView):
'''A view to present a nice XHTML Subversion diff from a user's jail.'''
template = 'template.html'
tab = 'files'
breadcrumb_text = 'Files'
subpath_allowed = True
def authorize(self, req):
return req.user is not None
def populate(self, req, ctx):
self.plugin_styles[Plugin] = ['diff.css']
revfields = req.get_fieldstorage().getlist("r")
if len(revfields) > 2:
raise BadRequest('A maximum of two revisions can be given.')
revs = [revfield.value for revfield in revfields]
jail_dir = os.path.join(req.config['paths']['jails']['mounts'],
req.user.login)
(out, err) = ivle.interpret.execute_raw(req.config, req.user, jail_dir,
'/home', os.path.join(req.config['paths']['share'],
'services/diffservice'),
[self.path] + revs
)
assert not err
response = json.loads(out)
if 'error' in response:
if response['error'] == 'notfound':
raise NotFound()
else:
raise AssertionError('Unknown error from diffservice: %s' %
response['error'])
# No error. We must be safe.
diff = response['diff']
# Split up the udiff into individual files
diff_matcher = re.compile(
r'^Index: (.*)\n\=+\n((?:[^I].*\n)*)',re.MULTILINE
)
ctx['title'] = os.path.normpath(self.path).rsplit('/', 1)[-1]
self.extra_breadcrumbs = make_path_breadcrumbs(req, self.subpath)
self.extra_breadcrumbs.append(SubversionDiffBreadcrumb())
# Create a dict with (name, HTMLdiff) pairs for each non-empty diff.
ctx['files'] = dict([(fd[0], genshi.XML(htmlfy_diff(fd[1])))
for fd in diff_matcher.findall(diff)
if fd[1]])
@property
def path(self):
return os.path.join(*self.subpath) if self.subpath else ''
class SubversionDiffBreadcrumb(object):
text = 'Subversion Diff'
def htmlfy_diff(difftext):
"""Adds HTML markup to a udiff string"""
output = cgi.escape(difftext)
subs = {
r'^([\+\-]{3})\s(\S+)\s\((.+)\)$':
r'<span class="diff-files">\1 \2 <em>(\3)</em></span>',
r'^\@\@ (.*) \@\@$':
r'<span class="diff-range">@@ \1 @@</span>',
r'^\+(.*)$':
r'<span class="diff-add">+\1</span>',
r'^\-(.*)$':
r'<span class="diff-sub">-\1</span>',
r'^\\(.*)$':
r'<span class="diff-special">\\\1</span>'
}
for match in subs:
output = re.compile(match, re.MULTILINE).sub(subs[match], output)
return '<pre class="diff">%s</pre>' % output
class Plugin(ViewPlugin, MediaPlugin):
views = [(ApplicationRoot, 'diff', DiffView)]
media = 'media'
|
UTF-8
|
Python
| false | false | 2,011 |
18,253,611,015,620 |
eecc4021c65a4520ea86717bde6ff5f235ad3094
|
c88b75461783f963dc25d176ebc4ef1a60401a3c
|
/extra/models.py
|
0680e7018b7839bf9d466b3adb4390f27aa7896a
|
[
"GPL-3.0-only"
] |
non_permissive
|
gctucker/mrwf
|
https://github.com/gctucker/mrwf
|
7622ef27040414b12eb02678509e5d5d973b5400
|
7bf2a0e3357b9a382f0931e46c56adbaa13f2fbe
|
refs/heads/master
| 2020-12-24T15:13:35.435812 | 2013-10-27T21:29:43 | 2013-10-27T21:29:43 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# MRWF - extra/models.py
#
# Copyright (C) 2009, 2010, 2011. 2012, 2013
# Guillaume Tucker <guillaume@mangoz.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.auth.models import User
from django.db.models import (CharField, TextField, DateField, BooleanField,
PositiveIntegerField, PositiveSmallIntegerField,
ForeignKey, OneToOneField, ManyToManyField,
ImageField)
from cams.models import (Record, Contact, Event, Fair, Person,
Application, EventApplication, Invoice)
from cams.libcams import get_obj_address
from mrwf.settings import IMG_MAX_D, IMG_MAX_d
from mrwf.extra import imaging
# WORKAROUND for temporary fix with the event contacts
from django.db.models import EmailField, URLField, IntegerField
from cams.models import Contact
class FairEventType(models.Model):
name = CharField(max_length=63)
tag = CharField(max_length=3)
notif_email = TextField(blank=True, help_text=
"e-mail addresses separated with whitespaces")
def __unicode__(self):
return self.name
class Meta(object):
verbose_name = 'Fair event listing'
ordering = ['name']
class FairEventCategory(models.Model):
word = CharField(max_length=31)
def __unicode__(self):
return self.word
class Meta(object):
verbose_name_plural = 'Fair event categories'
class FairEvent(Event):
event = OneToOneField(Event, parent_link=True)
etype = ForeignKey(FairEventType, blank=True, null=True,
verbose_name="Listing")
categories = ManyToManyField(FairEventCategory, null=True, blank=True)
# Bug fix to clear the image (file) field in admin:
# http://code.djangoproject.com/ticket/7048
# http://code.djangoproject.com/ticket/4979
image = ImageField(upload_to='img', blank=True, null=True)
age_min = PositiveIntegerField(blank=True, null=True)
age_max = PositiveIntegerField(blank=True, null=True)
# WORKAROUND
# Temporary fix to get a practical solution for the event contact details.
# This can be used to override the organisation contact details.
line_1 = CharField(max_length=63, blank=True)
line_2 = CharField(max_length=63, blank=True)
line_3 = CharField(max_length=63, blank=True)
town = CharField(max_length=63, blank=True)
postcode = CharField(max_length=15, blank=True)
country = CharField(max_length=63, blank=True)
email = EmailField(blank=True, max_length=127, help_text =
Contact.email_help_text, verbose_name="E-mail")
website = URLField(max_length=255, blank=True,
help_text=Contact.website_help_text)
telephone = CharField(max_length=127, blank=True)
mobile = CharField(max_length=127, blank=True)
fax = CharField(max_length=31, blank=True)
addr_order = IntegerField("Order", blank=True, default=0, help_text=
"Order of the premises on Mill Road.")
addr_suborder = IntegerField("Sub-order", blank=True, default=0,
help_text=
"Order of the premises on side streets around Mill Road.")
ignore_org_c = BooleanField(default=False, verbose_name =
"Ignore organisation contacts")
def save(self, *args, **kwargs):
if not self.fair:
self.fair = Fair.objects.get(current=True)
if not self.date:
self.date = self.fair.date
if self.image:
imaging.scale_down(self.image, IMG_MAX_D, IMG_MAX_d)
super(FairEvent, self).save(args, kwargs)
# WORKAROUND to make the event contacts more flexible
def get_composite_contact(self):
class CompositeContact(object):
def __init__(self, event):
attrs = ['line_1', 'line_2', 'line_3', 'postcode', 'town',
'country', 'email', 'website', 'telephone', 'mobile',
'fax', 'addr_order', 'addr_suborder']
if event.ignore_org_c:
for att in attrs:
setattr(self, att, getattr(event, att, ''))
else:
if event.org and event.org.contact_set.count() > 0:
org_c = event.org.contact_set.all()[0]
else:
org_c = None
for att in attrs:
value = getattr(event, att, '')
if org_c and not value:
value = getattr(org_c, att, '')
setattr(self, att, value)
def get_address(self, *args):
return get_obj_address(self, *args)
return CompositeContact(self)
@classmethod
def get_for_fair(cls, event_id, fair):
base_event = super(FairEvent, cls).get_for_fair(event_id, fair)
if base_event:
return cls.objects.get(pk=base_event.pk)
return None
class StallEvent(FairEvent):
TELEPHONE = 0
EMAIL = 1
WEBSITE = 2
xcontact = ((TELEPHONE, 'telephone'),
(EMAIL, 'email'),
(WEBSITE, 'website'))
PLOT_A = 0
PLOT_B = 1
PLOT_C = 2
xplot = ((PLOT_A, 'Plot A (3x3)'),
(PLOT_B, 'Plot B (3x4)'),
(PLOT_C, 'Plot C (3x5)'))
MC_STALL_OPT_INSIDE_1 = 0
MC_STALL_OPT_INSIDE_2 = 1
MC_STALL_OPT_OUTSIDE = 2
xmcstall = ((MC_STALL_OPT_INSIDE_1, 'Inside the marquee, 1 table'),
(MC_STALL_OPT_INSIDE_2, 'Inside the marquee, 2 tables'),
(MC_STALL_OPT_OUTSIDE, 'Outside'))
# ToDo: make this dynamic (in the FairEventType table?) with prices to
# avoid changing the model each year...
# For Market & Craft Stalls
n_spaces = PositiveSmallIntegerField(
default=0, verbose_name="Number of spaces", blank=True, null=True)
n_tables = PositiveSmallIntegerField(
default=0, verbose_name="Number of tables", blank=True, null=True)
mc_stall_option = PositiveSmallIntegerField(
choices=xmcstall, blank=True, null=True, verbose_name="Stall options")
# For Food Fair
plot_type = PositiveSmallIntegerField(
choices=xplot, blank=True, null=True, verbose_name="Plot type")
infrastructure = TextField(
blank=True, null=True, verbose_name="Infrastructure description")
tombola_gift = BooleanField(
default=False, blank=True, verbose_name="Tombola gift")
tombola_description = TextField(
blank=True, null=True, verbose_name="Tombola gift description")
invoice_person = ForeignKey(Person, blank=True, null=True)
invoice_contact = ForeignKey(Contact, blank=True, null=True)
main_contact = PositiveSmallIntegerField(
choices=xcontact, blank=True, null=True)
extra_web_contact = TextField(blank=True)
comments = TextField(blank=True)
media_usage = BooleanField(
default=False, blank=True, verbose_name="Media usage authorisation")
def get_main_contact_value(self):
value = None
if self.main_contact:
p = self.event.owner.person
c = Contact.objects.filter(obj=p)
if c.count() > 0:
if self.main_contact == StallEvent.TELEPHONE:
value = c[0].telephone
elif self.main_contact == StallEvent.EMAIL:
value = c[0].email
elif self.main_contact == StallEvent.WEBSITE:
value = c[0].website
if not value:
value = '[not provided]'
return value
@property
def plot_type_str(self):
if self.plot_type is None:
return ''
return StallEvent.xplot[self.plot_type][1]
@property
def mc_stall_option_str(self):
if self.mc_stall_option is None:
return ''
return StallEvent.xmcstall[self.mc_stall_option][1]
class FairEventApplication(EventApplication):
STALLHOLDER = 0
ADVERTISER = 1
SPONSOR = 2
OTHER = 3
xtypes = ((STALLHOLDER, 'stallholder'), (ADVERTISER, 'advertiser'),
(SPONSOR, 'sponsor'), (OTHER, 'other'))
subtype = PositiveSmallIntegerField(choices=xtypes)
org_name = CharField \
(max_length=128, blank=True, verbose_name="Organisation name")
@property
def type_str(self):
return FairEventApplication.xtypes[self.subtype][1]
def save(self, *args, **kwargs):
if (self.status == Application.REJECTED):
self.event.status = Record.DISABLED
self.event.save()
elif (self.status == Application.PENDING):
self.event.status = Record.NEW
self.event.save()
super(FairEventApplication, self).save(args, kwargs)
class Listener(models.Model):
STALL_APPLICATION_RECEIVED = 0
STALL_APPLICATION_ACCEPTED = 1
STALL_APPLICATION_REJECTED = 2
xtrigger = ((STALL_APPLICATION_RECEIVED, 'stall application received'),
(STALL_APPLICATION_ACCEPTED, 'stall application accepted'),
(STALL_APPLICATION_REJECTED, 'stall application rejected'))
trigger = PositiveSmallIntegerField(choices=xtrigger)
user = ForeignKey(User)
def __unicode__(self):
return '{0} upon {1}'.format \
(self.user, Listener.xtrigger[self.trigger][1])
class StallInvoice(Invoice):
stall = ForeignKey(StallEvent, unique=True)
def __unicode__(self):
return self.stall.__unicode__()
class Meta(Invoice.Meta):
ordering = ['stall__name']
|
UTF-8
|
Python
| false | false | 2,013 |
10,763,188,052,005 |
d3c7e1fec77009cf848bc503e2e7dafbeb898972
|
3b94bc467617a0272a80917abee39f37acd81a8b
|
/rseqflow2/ExpressionEstimation/SamSplitEvenly_and_Randomly_gencode_modify.py
|
eafb2a0721eced8bf0156492fe2d40a4b80024f5
|
[] |
no_license
|
falconer502/RseqFlow
|
https://github.com/falconer502/RseqFlow
|
b5f47ca2371b43798bf61be1de1d000010a0fde5
|
fcb94a6f4ae2e01b9759292a8f85d25503d7f99f
|
refs/heads/master
| 2021-01-15T10:24:57.343473 | 2014-09-23T21:36:51 | 2014-09-23T21:36:51 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#JSH 2013-10-31
#if a read mapped to multiple genes, only keep the reads mapped to single gene
# Bowtie2 default is to keep only the best valid alignment
# It uses the SAM optional field XS: to report the alignment score for the
# 2nd best alignment for a read.
# If "XS:i:" field is present, assume the read was multimapped, otherwise assume it's unique
#ORIG CODE:
#if a read mapped to multiple genes, only keep the reads mapped to single gene
#and then split the multi-mapped reads into the several genes evenly
#or assign the multi-mapped reads to one of the genes randomly
import os
import sys
import random
import optparse
import re
#fname_sam='c:/Python26/sample.txt'
#fname_table='c:/Python26/isoform_gene_table_ref19.txt'
#output='MappingSingleGene.txt'
#output2='SplitbyGenelength.txt'
prog_base = os.path.split(sys.argv[0])[1]
parser = optparse.OptionParser()
parser.add_option("-s", "--sam-mappingResult", action = "store", type = "string", dest = "mappingResult_sam_file",
help = "mapping result in sam format")
#JSH 2013-10-31 Leaving this in for now but it only applies to orig code
parser.add_option("-g", "--gtf-annotation", action = "store", type = "string", dest = "anotation_gtf_file",
help = "annotation in gtf format")
parser.add_option("-u", "--output-unique", action = "store", type = "string", dest = "uniqueMap_sam_output",
help = "uniquely mapped reads result in sam format")
parser.add_option("-m", "--output-multiple", action = "store", type = "string", dest = "multiMap_txt_output",
help = "multiple mapped reads result in txt file")
(options, args) = parser.parse_args()
if (options.mappingResult_sam_file is None or
# JSH 2013-10-31 Commented out, only required for orig code
# options.anotation_gtf_file is None or
options.uniqueMap_sam_output is None or
options.multiMap_txt_output is None):
print prog_base + ": error: missing required command-line argument."
parser.print_help()
sys.exit(1)
fname_sam=options.mappingResult_sam_file # the original mapping file
#JSH 2013-10-31 Commented out, part of orig code
#fname_gtf=options.anotation_gtf_file # isoform_gene_gtf
fname_uniq=options.uniqueMap_sam_output # file for reads that map to single gene
fname_multi=options.multiMap_txt_output # file for reads that map to multiple genes
#JSH 2013-10-31 Commented out, part of orig code
#bk_gtf=open(fname_gtf)
#sh_gtf=bk_gtf.readlines()
#bk_gtf.close()
#JSH 2013-10-31 Commented out, part of orig code
##JSH 2013-08-26
#### create empty file if chr not present (required for PEGASUS workflow) ###
#if(sh_gtf==0):
# print "SamSplitEvenly_and_Randomly_gencode_modify.py: No chr data for " + fname_gtf
# try:
# open(fname_uniq,'w').close()
# except:
# print prog_base + ": error: cannot create file " + fname_uniq
# sys.exit(1)
# try:
# open(fname_multi,'w').close()
# except:
# print prog_base + ": error: cannot create file " + fname_multi
# sys.exit(1)
#
# sys.exit(0)
#
#chromosome=None
#isoform_gene={}
#gene_readsplit={}
#gene_readrandom={}
###########generate dictionary: key-isoform, value-gene########
#pre_transcript_id ='none'
#samline=0
#for v in range(0,len(sh_gtf)):
# temp=sh_gtf[v][0:-1].split('\t')
# temp1 =temp[8]
# attributes =temp1.split(';')
# temp2 =attributes[0].split('"')
# gene_id =temp2[1]
# temp3 =attributes[1].split('"')
# transcript_id =temp3[1]
# #if gene_id==transcript_id:
# # print "Gene ID:"+gene_id+'\n'
# # print "Transcrit ID:"+transcript_id+'\n'
# # print "Gene ID and Transcript ID are the same, so please check the annot."
# #sys.exit(1)
# if transcript_id!=pre_transcript_id:
# isoform_gene[transcript_id]=gene_id
# pre_gene_id=gene_id
# pre_transcript_id=transcript_id
bk_sam=open(fname_sam)
#sh_sam=bk_sam.readlines()
#bk_sam.close()
#JSH 2013-10-31 Commented out, part of orig code
#read_alignments={}
#read_genes={}
#samline=0
#JSH 2013-10-31 Added the following 3 lines:
uniq_output=open(fname_uniq, 'w')
multi_output=open(fname_multi, 'w')
secondary=256 #secondary read tag from bowtie2 sam file
for v in bk_sam.xreadlines():
# samline+=1
if v[0]=='@':
# sam header
continue
temp1=v[0:-1].split('\t')
# readID=temp1[0]
# Check that reference field is not "*"
if re.match('^\*',temp1[2]):
continue
#JSH 2013-10-31 Added:
# If field "XS:i:" is present, read is multimapped
if re.search(r'XS:i:',v):
multi_output.writelines(v)
else:
fields=v[0:-1].split('\t')
flag=int(fields[1])
if (flag&secondary):
multi_output.writelines(v)
else:
uniq_output.writelines(v)
bk_sam.close()
uniq_output.close()
multi_output.close()
print "Done splitting %s for unique and multiple mapped reads" % fname_sam
# JSH 2013-10-31 THIS IS THE NEW END OF FILE
#JSH 2013-10-31 Commented out the rest of the code, part of orig code
# temp2=temp1[2].split('_',2)
# #if len(temp2)!=3:
# # print "File: %s , line %d" % (fname_sam, samline)
# # print "Error:some information in transcripts title is missing."
# # print "please check the transcripts name in reference sequence(fa file or sam file)."
# # print "The correct format should be '$genomeName_$AnnotationSource_$TranscriptsID=$Chromosome:$Start-$End'."
# # sys.exit(1)
# #if (temp2[0]=='') or (temp2[1]==''):
# # print "File: %s , line %d" % (fname_sam, samline)
# # print "Error:genomeName or AnnotationSource is missing."
# # print "please check the transcripts name in reference sequence(fa file or sam file). In line %d" % (samline)
# # print "The correct format should be '$genomeName_$AnnotationSource_$TranscriptsID=$Chromosome:$Start-$End'."
# # sys.exit(1)
# temp3=temp2[2].split('=')
# #if len(temp3)!=2:
# # print "File: %s , line %d" % (fname_sam, samline)
# # print "Error:transcriptID or chromosome information is missing."
# # print "please check the transcripts name in reference sequence(fa file or sam file). In line %d" % (samline)
# # print "The correct format should be '$genomeName_$AnnotationSource_$TranscriptsID=$Chromosome:$Start-$End'."
# # sys.exit(1)
# #if temp3[0]=='':
# # print "File: %s , line %d" % (fname_sam, samline)
# # print "Error:transcriptID information is missing."
# # print "please check the transcripts name in reference sequence(fa file or sam file). In line %d" % (samline)
# # print "The correct format should be '$genomeName_$AnnotationSource_$TranscriptsID=$Chromosome:$Start-$End'."
# # sys.exit(1)
# #if temp3[1]=='':
# # print "File: %s , line %d" % (fname_sam, samline)
# # print "Error:chromosome information is missing."
# # print "please check the transcripts name in reference sequence(fa file or sam file). In line %d" % (samline)
# # print "The correct format should be '$genomeName_$AnnotationSource_$TranscriptsID=$Chromosome:$Start-$End'."
# # sys.exit(1)
# isoform=temp3[0]
# temp4=temp3[1].split(':')
# chromosome=temp4[0]
# #JSH -- commented read_length since it is now handled in Get_ReadsMappingInformation.py
# #temp6=temp1[5].split('M')
# #read_length=int(temp6[0])
# try:
# gene=isoform_gene[isoform]
# except:
# #print "Warning:transcript ID in annotation and transcript ID in sam file are not consistent. File:%s, in line %d" % (fname_sam, samline)
# #print " If all transcripts in two files are not consistent, the result will be null."
# continue
# try:
# read_alignments[readID].append(v)
# read_genes[readID].append(gene)
# except:
# read_alignments[readID]=[]
# read_genes[readID]=[]
# read_alignments[readID].append(v)
# read_genes[readID].append(gene)
#bk_sam.close()
#############get read length and chromosome##########
##temp=sh_sam[-1].split('\t')
##temp1=temp[5].split('M')
##read_length=int(temp1[0])
##temp2=temp[2].split('=')
##temp3=temp2[1].split(':')
##chromosome=temp3[0]
#
#file_output=open(fname_uniq, 'w')
#file_output2=open(fname_multi, 'w')
#
#read_count=0
#for readID in read_alignments:
# read_count+=1
# gene=[]
# same_read_mapping=[]
# gene=read_genes[readID]
# same_read_mapping=read_alignments[readID]
# genelist=list(set(gene))
# if len(genelist)>1:
# gene_transcriptPosition={}
# for v in range(0, len(same_read_mapping)):
# temp=same_read_mapping[v].split('\t')
# temp1=temp[2].split('_',2)
# temp2=temp1[2].split('=')
# transcriptID=temp2[0]
# mapPosition=temp[3]
# transcript_position=str(transcriptID)+':'+str(mapPosition)
# geneID=isoform_gene[transcriptID]
# try:
# gene_transcriptPositions[geneID].append(transcript_position)
# except:
# gene_transcriptPosition[geneID]=[]
# gene_transcriptPosition[geneID].append(transcript_position)
# writeline=str(readID)+'\t'+chromosome+'\t'
# for geneID in gene_transcriptPosition:
# transcriptPosition_list=gene_transcriptPosition[geneID]
# length=len(transcriptPosition_list)
# randomly=random.randint(0,length-1) #create the random number to assign the read randomly
# writeline += str(transcriptPosition_list[randomly])
# writeline += ','
# file_output2.writelines(writeline+'\n')
#
# else:
# length=len(same_read_mapping)
# randomly=random.randint(0,length-1)
# file_output.writelines(str(same_read_mapping[randomly]))
###########################################
#
#file_output.close()
#
#print "Uniquely mapped result and multiple mapped result Done for Current Chromosome"
#if chromosome is not None:
# print "Uniquely mapped result and multiple mapped result Done for Chromosome: %s" % (chromosome)
|
UTF-8
|
Python
| false | false | 2,014 |
6,992,206,772,845 |
c270c5a46309e11f9834ba47713ea8f9b9625da6
|
f1d12e236031febe9d86c2196d54a9c4b2b7613d
|
/ensconce/dao/resources.py
|
742936899ab97a0d0037601af68f2807c848848b
|
[
"BSD-3-Clause"
] |
permissive
|
netwrkr/ensconce
|
https://github.com/netwrkr/ensconce
|
59529ff66c1a69a7e43f539fef17f84aa05c26bf
|
eda938c67eb0af8fb7d3ccf668e07d2f76485aa5
|
refs/heads/master
| 2021-01-18T19:59:43.742730 | 2014-06-19T19:15:17 | 2014-06-19T19:15:17 | 26,174,853 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
This module provides the database routines to manipulate resources.
"""
from __future__ import absolute_import
from sqlalchemy import or_, and_, func
from sqlalchemy.orm.exc import NoResultFound
#from ensconce.dao import groups
from ensconce.autolog import log
from ensconce.dao import SearchResults
from ensconce.model import meta
from ensconce import model, exc
def get(resource_id, assert_exists=True):
"""
This function will return a resource object for the id specified.
:param resource_id: The ID for resource to lookup.
:param assert_exists: Whether to raise :class:`exc.exception if entity does not exist (avoid NPE later).
:raise ensconce.exc.NoSuchEntity: If operator does not exist and assert_exists is True.
:rtype: :class:`model.Operator`
"""
session = meta.Session()
try:
resource = session.query(model.Resource).get(resource_id)
except:
log.exception("Error retrieving resource")
raise
if assert_exists and not resource:
raise exc.NoSuchEntity(model.Resource, resource_id)
return resource
def get_by_name(name, assert_single=True, assert_exists=True):
"""
Gets the/a matching resource by name.
If assert_one is True, then exactly one resource will be returned and
an exception will be raised if there are multiple.
Otherwise the first match will be returned (ordered conssitently by ID), or
None if no match found.
:param name: The name of the resource.
:param assert_single: Whether to ensure that there is only one match in the DB.
:param assert_exists: Whether to ensure that there is a (at least one) match in the DB.
:return: The matching resource, or None if no resource matches.
"""
session = meta.Session()
match = None
try:
r_t = model.resources_table
q = session.query(model.Resource).filter_by(name=name)
q = q.order_by(r_t.c.id)
if assert_single:
match = q.one()
else:
match = q.first()
except NoResultFound:
# Pass-through to check after block
pass
except:
log.exception("Error looking up resource by name: {0}".format(name))
raise
if assert_exists and not match:
raise exc.NoSuchEntity(model.Resource, name)
return match
def search(searchstr=None, order_by=None, offset=None, limit=None): # @ReservedAssignment
"""
Search within resources and return matched results for specified limit/offset.
:param searchstr: A search string that will be matched against name, addr, and description attributes.
:type searchstr: str
:param order_by: The sort column can be expressed as a string that includes asc/desc (e.g. "name asc").
:type order_by: str
:param offset: Offset in list for rows to return (supporting pagination).
:type offset: int
:param limit: Max rows to return (supporting pagination).
:type limit: int
:returns: A :class:`ensconce.dao.SearchResults` named tuple that includes count and list of :class:`ensconce.model.Resource` matches.
:rtype: :class:`ensconce.dao.SearchResults`
"""
session = meta.Session()
try:
r_t = model.resources_table
if order_by is None:
order_by = r_t.c.name
clauses = []
if searchstr:
clauses.append(or_(r_t.c.name.ilike('%'+searchstr+'%'),
r_t.c.addr.ilike('%'+searchstr+'%'),
r_t.c.description.ilike('%'+searchstr+'%')))
# (Well, there's only a single clause right now, so that's a little over-engineered)
count = session.query(func.count(r_t.c.id)).filter(and_(*clauses)).scalar()
q = session.query(model.Resource).filter(and_(*clauses)).order_by(order_by)
if limit is not None:
q = q.limit(limit)
if offset is not None:
q = q.offset(offset)
return SearchResults(count=count, entries=q.all())
except:
log.exception("Error listing resources")
raise
def list(): # @ReservedAssignment
"""
This function will return a list of all resources.
:param offset: Offset in list for rows to return (supporting pagination).
:type offset: int
:param limit: Max rows to return (supporting pagination).
:type limit: int
:returns: A list of :class:`ensconce.model.Resource` results.
:rtype: list
"""
session = meta.Session()
try:
r_t = model.resources_table
q = session.query(model.Resource)
q = q.order_by(r_t.c.name)
resources = q.all()
except:
log.exception("Error listing resources")
raise
else:
return resources
def create(name, group_ids, addr=None, description=None, notes=None, tags=None):
"""
This function will create a new resource record in the database.
"""
if group_ids is None:
group_ids = []
elif isinstance(group_ids, (int, basestring)):
group_ids = [int(group_ids)]
if not group_ids:
raise ValueError("No group ids specified for new resource.")
session = meta.Session()
try:
resource = model.Resource()
resource.name = name # BTW, non-unique resource names are allowed.
resource.addr = addr
resource.description = description
resource.notes_decrypted = notes
resource.tags = tags
session.add(resource)
session.flush()
except:
log.exception("Error creating resource.")
raise
for group_id in group_ids:
try:
group_lookup = model.GroupResource()
group_lookup.resource_id = resource.id
group_lookup.group_id = int(group_id)
session.add(group_lookup)
session.flush() # Fail fast
except:
log.exception("Error adding group to resource: {0}, resource={1}".format(group_id, name))
raise
session.flush()
return resource
def modify(resource_id, group_ids=None, **kwargs):
"""
This function will modify a resource entry in the database, only updating
specified attributes.
:param resource_id: The ID of resource to modify.
:keyword group_ids: The group IDs that this resource should belong to.
:keyword name: The resource name.
:keyword addr: The resource address.
:keyword notes: An (encrypted) notes field.
:keyword tags: The tags field.
:keyword description: A description fields (not encrypted).
"""
if isinstance(group_ids, (basestring,int)):
group_ids = [int(group_ids)]
if group_ids is not None and len(group_ids) == 0:
raise ValueError("Cannot remove all groups from a resource.")
session = meta.Session()
resource = get(resource_id)
update_attributes = kwargs
try:
modified = model.set_entity_attributes(resource, update_attributes, encrypted_attributes=['notes'])
session.flush()
except:
log.exception("Error updating resource.")
raise
gr_t = model.group_resources_table
if group_ids is not None:
# Is it different from what's there now?
if set(group_ids) != set([cg.id for cg in resource.groups]):
try:
session.execute(gr_t.delete(gr_t.c.resource_id==resource_id))
for group_id in group_ids:
group_id = int(group_id)
gr = model.GroupResource()
gr.resource_id = resource.id
gr.group_id = group_id
session.add(gr)
session.flush()
except:
log.exception("Error adding group memberships")
raise
else:
modified += ['group_ids']
session.flush()
return (resource, modified)
def delete(resource_id):
"""
This function will delete a host record from the database.
"""
session = meta.Session()
try:
resource = get(resource_id)
session.delete(resource)
session.flush()
except:
log.exception("Error deleting resource: {0}".format(resource_id))
raise
return resource
|
UTF-8
|
Python
| false | false | 2,014 |
12,532,714,605,583 |
01dd168d113c2337c71697adf98112fe8f09d6f0
|
96f1b806b78ff063327be70e61fe292a13c2e85c
|
/IDS-and-A-Star/A-Star/graph.py
|
ca5cd293dff731b37ace236870b67b8a1e3aa15c
|
[
"MIT"
] |
permissive
|
aleksandar-mitrevski/ai
|
https://github.com/aleksandar-mitrevski/ai
|
0386e418ab5303c4348fc42c8a1b236bfadb59fa
|
b9c7e6da18110da2c6add6982cc9c4244b820f12
|
refs/heads/master
| 2020-05-16T20:42:31.724215 | 2014-02-03T23:08:21 | 2014-02-03T23:08:21 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import coordinates
import math
import matplotlib.pyplot as pyplot
class GraphNode(object):
"""Defines graph nodes.
Author: Aleksandar Mitrevski
"""
def __init__(self, label=-1, coordinates=None):
"""Creates a graph node.
Keyword arguments:
label -- A node label which is expected to be an integer (default -1)
coordinates -- A 'Coordinates' object defining the x/y coordinates of the node (default None)
"""
self.label = label
self.coordinates = coordinates
self.children = []
class GraphEdge(object):
"""Defines a graph edge.
Author: Aleksandar Mitrevski
"""
def __init__(self, connectedNode, cost):
"""Creates a directed graph edge.
Keyword arguments:
connectedNode -- Label of the children node.
cost -- Cost of the edge, which is calculated as a straight-line distance between the nodes.
"""
self.connectedNode = connectedNode
self.cost = cost
class Graph(object):
"""Defines a graph data structure.
Author: Aleksandar Mitrevski
"""
def __init__(self):
"""Creates a graph."""
self.nodes = dict()
def add_node(self, node):
"""Adds a node to the graph.
Keyword arguments:
node -- A 'GraphNode' object.
"""
self.nodes[node.label] = node
def add_edge(self, parentNodeKey, childNodeKey):
"""Adds a directed edge between the nodes with labels
'parentNodeKey' and 'childNodeKey'.
Returns 'True' if the edge is added successfully.
If the edge already exists, does not add it to the graph and returns 'False'.
Keyword arguments:
parentNodeKey -- Label of the parent node.
childNodeKey -- Label of the child node.
"""
if childNodeKey not in self.nodes[parentNodeKey].children:
cost = self._distance(parentNodeKey, childNodeKey)
parentToChildEdge = GraphEdge(childNodeKey, cost)
self.nodes[parentNodeKey].children.append(parentToChildEdge)
return True
else:
return False
def node_exists(self, nodeKey):
"""Returns 'True' if the node with with key 'nodeKey' exists in the graph and 'False' otherwise.
nodeKey -- A node key (an integer).
"""
if nodeKey in self.nodes:
return True
else:
return False
def visualize_graph(self, shortestPathNodes):
"""Visualizes the graph after finding a shortest path between two nodes.
Keyword arguments:
shortestPathNodes -- A list of labels of nodes that belong to the shortest path between two nodes.
"""
pyplot.ion()
for key, value in self.nodes.items():
pyplot.plot(value.coordinates.x, value.coordinates.y, 'ro')
startCoordinates = self.nodes[shortestPathNodes[0]].coordinates
goalCoordinates = self.nodes[shortestPathNodes[len(shortestPathNodes)-1]].coordinates
pyplot.plot(startCoordinates.x, startCoordinates.y, 'go')
pyplot.plot(goalCoordinates.x, goalCoordinates.y, 'yo')
for i in xrange(len(shortestPathNodes) - 1):
coord1 = self.nodes[shortestPathNodes[i]].coordinates
coord2 = self.nodes[shortestPathNodes[i+1]].coordinates
pyplot.plot([coord1.x, coord2.x], [coord1.y, coord2.y], 'b-')
pyplot.show()
def _distance(self, node1, node2):
"""Returns the straight-line distance between two nodes.
Keyword arguments:
node1 -- Label of a node in the graph (an integer).
node2 -- Label of another node in the graph (an integer).
"""
node1Coordinates = self.nodes[node1].coordinates
node2Coordinates = self.nodes[node2].coordinates
distance = math.sqrt((node1Coordinates.x - node2Coordinates.x)**2 + (node1Coordinates.y - node2Coordinates.y)**2)
return distance
|
UTF-8
|
Python
| false | false | 2,014 |
206,158,431,148 |
8675aeb0c6122b336e549c7bfda9e6ea6706d611
|
897eb2ba9488faa17614a7eeb48b1d840da28135
|
/import_hxl.py
|
9e2d644a0d8948c1e7c88fdfdb168a3ba2fedc31
|
[] |
no_license
|
djcronin/ImportHXL
|
https://github.com/djcronin/ImportHXL
|
eb1d78ad8710a8c6ac5fb0863e46a1b6453809ed
|
02ac30a1999524154e260128dd55470ad448925d
|
refs/heads/master
| 2018-05-18T16:28:46.768001 | 2014-07-16T21:55:58 | 2014-07-16T21:55:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ImportHXL
A QGIS plugin
Imports Humanitarian Exchange Language data in .csv format.
-------------------
begin : 2014-07-01
git sha : $Format:%H$
copyright : (C) 2014 by Daniel Cronin
email : ucescro@live.ucl.ac.uk
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Initialize Qt resources from file resources.py
import resources_rc
from qgis.core import *
# Import the code for the dialog
from import_hxl_dialog import ImportHXLDialog
# print 'ImportHXLDialog.__path__: '+ str(ImportHXLDialog.__path__)
import os.path
import csv
import hxlParser
#ImportHXLDialog = ImportHXLDialog
#_browseCsvFile = ImportHXLDialog._browseCsvFile
#getFilename = ImportHXLDialog.getFilename
class ImportHXL:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
self.dlg = ImportHXLDialog()
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'ImportHXL_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
# self.dlg.ui.writeFileCheck.isChecked()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&HXL .csv importer')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'ImportHXL')
self.toolbar.setObjectName(u'ImportHXL')
# boxChecked = False
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('ImportHXL', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the InaSAFE toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/ImportHXL/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Import HXL .csv'),
callback=self.run,
parent=self.iface.mainWindow())
result = QObject.connect(self.dlg.ui.writeFileCheck,SIGNAL("stateChanged(int)"),self.changeActive)
QMessageBox.information(None, 'Connection result', "Connect returned %s" % result)
def changeActive(self, state):
if (state==Qt.Checked):
QObject.connect(self.clickTool, SIGNAL("canvasClicked(const QgsPoint &, Qt::MouseButton)"), self.handleMouseDown)
print 'box checked'
# self.boxChecked = True
else:
QObject.disconnect(self.clickTool, SIGNAL("canvasClicked(const QgsPoint &, Qt::MouseButton)"), self.handleMouseDown)
def handleMouseDown(self):
print 'box checked'
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&HXL .csv importer'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# print 'pressed OK'
# if self.boxChecked == True:
# print 'box thing 2'
# if self.dlg.ui.rowFeatRadio.isChecked():
# print 'box checked!'
# if self.dlg.ui.
# if self.dlg.outputDir in locals() or self.dlg.outputDir in globals():
# print 'outputDir = ' + str(self.dlg.outputDir)
# if self.dlg.ui.rowFeatRadio.isEnabled():
# print 'write file checked'
# else:
# print 'write file not checked'
""" Get file path from interface dialog and initiate .csv reader:
"""
with open(str(self.dlg.filename), 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"') # quotechar changed from / to "
header = True
headersAreValid = True
invalidHeaders = []
attributeFields = []
n = 0 # total number of codes
nG = 0 # number of geo codes
nGindex = [] # column indices of geo codes
nAtt = n - nG # number of attribute fields
for row in reader:
if header == True:
for code in row:
''' Check that headers are all valid HXL codes:
'''
if code in hxlParser.vocabulary:
pass
else:
headersAreValid = False
invalidHeaders.append(str(code))
''' Determine column index and number of geo fields:
'''
if code == 'loc.wkt':
wktColumn = n
nGindex.append(n)
nG += 1
elif code == 'loc.srid':
sridColumn = n
nGindex.append(n)
nG += 1
elif code == 'lat.deg':
latDegColumn = n
nGindex.append(n)
nG += 1
elif code == 'lon.deg':
lonDegColumn = n
nGindex.append(n)
nG += 1
else:
attributeFields.append(code)
n += 1
header = False
elif headersAreValid == False:
''' If invalid headers are used, the user is informed and the import is terminated:
'''
QMessageBox.critical(self.iface.mainWindow(),"Error","This file contains invalid headers:"+str(invalidHeaders))
break
else:
''' Construct a list of strings containing HXL values and concatenate a URI string,
which contains the relevant details for layer creation:
'''
values = []
for code in row:
values.append(code)
# print 'values: ' + str(values)
uri = self.dlg.filename + '?delimiter=%s&wktField=%s'
''' Get non-geometry attribute values for inclusion in attribute table:
'''
nonGeomAttributes = []
for n in range(len(values)):
# print 'n = ' + str(n)
if n in nGindex:
pass
else:
nonGeomAttributes.append(values[n])
''' Locate EPSG:SRID code and append to URI string:
'''
if sridColumn >= 0:
uri += '&crs=EPSG:' + values[sridColumn]
else:
pass
''' Append the appropriate number of attributes to URI string:
'''
uriFields = ''
for field in attributeFields:
uriFields += '&field=' + str(hxlParser.englishHXL[field]) + ':string(255)'
''' Determine geometry type from WKT column and add layer to map:
'''
if values[wktColumn][0:5].lower() == 'point':
layer = QgsVectorLayer('Point?crs=EPSG:'+values[sridColumn]+uriFields
+'&spatialIndex=yes&subsetIndex=yes&watchFile=no',
'pointtest', 'memory')
pr = layer.dataProvider()
fet = QgsFeature()
fet.setGeometry(QgsGeometry.fromWkt(values[wktColumn]))
fet.setAttributes(nonGeomAttributes)
QgsMapLayerRegistry.instance().addMapLayer(layer)
pr.addFeatures([fet])
layer.updateExtents()
elif values[wktColumn][0:4].lower() == 'line':
layer = QgsVectorLayer('Linestring?crs=EPSG:'+values[sridColumn]+uriFields
+'&spatialIndex=yes&subsetIndex=yes&watchFile=no',
'linetest', 'memory')
pr = layer.dataProvider()
fet = QgsFeature()
fet.setGeometry(QgsGeometry.fromWkt(values[wktColumn]))
fet.setAttributes(nonGeomAttributes)
QgsMapLayerRegistry.instance().addMapLayer(layer)
pr.addFeatures([fet])
layer.updateExtents()
elif values[wktColumn][0:4].lower() == 'poly':
layer = QgsVectorLayer('Polygon?crs=EPSG:'+values[sridColumn]+uriFields
+'&spatialIndex=yes&subsetIndex=yes&watchFile=no',
'polytest', 'memory')
pr = layer.dataProvider()
fet = QgsFeature()
fet.setGeometry(QgsGeometry.fromWkt(values[wktColumn]))
fet.setAttributes(nonGeomAttributes)
QgsMapLayerRegistry.instance().addMapLayer(layer)
pr.addFeatures([fet])
layer.updateExtents()
else:
pass
else:
pass
''' To do next: Write parser, get attributes of spreadsheet as values and call parser function
Done 06/07
'''
''' To do next: Get attribute fields from spreadsheet and translate codes to plain English headings
Done 07/07
'''
''' To do next:
Allow naming of layer/layers (default based on input file),
select feature per line/per table,
choose layer type e.g. shapefile & save to disc,
set up multipoint/multiline/multipolygon & geomcollection (?)
'''
|
UTF-8
|
Python
| false | false | 2,014 |
3,951,369,959,930 |
2eb09d79d6c74d44711d8239898470fddb2d3894
|
57bad21460600a3f39ccf72435634226d684da17
|
/tmiddleware/__init__.py
|
1dfaab4354722d8973c159159f2cf4759225e625
|
[] |
no_license
|
iamsk/tmiddleware
|
https://github.com/iamsk/tmiddleware
|
f4c9a26aa8b2d0011749d4d83e2965920188e78b
|
3ce38071f8625f96fd56ab950051d76aacd126f4
|
refs/heads/master
| 2020-06-06T12:35:20.147713 | 2013-11-30T06:52:10 | 2013-11-30T06:52:10 | 10,245,028 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding: utf-8 -*-
import logging
from tornado.options import options
class TMiddleware():
def __init__(self, handler):
self.handler = handler
self.request_middlewares = []
self.before_response_middlewares = []
self.after_response_middlewares = []
self.init()
def init(self):
if hasattr(options, 'middlewares') and len(options.middlewares) > 0:
for mclass in options.middlewares:
modname, clsname = self._split_name(mclass)
try:
mod = __import__(modname, globals(), locals(), [clsname])
except ImportError, e:
logging.error("module __import__ failed: {0}".format(e), exc_info=True)
continue
try:
cls = getattr(mod, clsname)
inst = cls(self.handler)
if hasattr(inst, 'request_hook'):
self.request_middlewares.append(inst)
if hasattr(inst, 'before_response_hook'):
self.before_response_middlewares.append(inst)
if hasattr(inst, 'after_response_hook'):
self.after_response_middlewares.append(inst)
except AttributeError, e:
logging.error("cant instantiate cls: {0}".format(e), exc_info=True)
print "cant instantiate cls", e
def _run_hooks(self, type, middlewares, chunk=None):
for middleware in middlewares:
try:
if type == 'request':
middleware.request_hook()
if type == 'before_response':
middleware.before_response_hook(chunk)
if type == 'after_response':
middleware.after_response_hook()
except Exception as e:
logging.error(e, exc_info=True)
def request_hooks(self):
"""
Executed in prepare() of the Request, as before http method
"""
self._run_hooks('request', self.request_middlewares)
def before_response_hooks(self, chunk=None):
"""
Executed in finish() of the Request, as after http method
"""
self._run_hooks('before_response', self.before_response_middlewares, chunk)
def after_response_hooks(self):
"""
Executed in on_finish(), as after finish() of the Request
Useful for logging
"""
self._run_hooks('after_response', self.after_response_middlewares)
def _split_name(self, path):
try:
pos = path.rindex('.')
except ValueError:
raise Exception('%s is invalid' % path)
return path[:pos], path[pos + 1:]
|
UTF-8
|
Python
| false | false | 2,013 |
3,066,606,669,488 |
95c04c6e4b4993dbe33044df1ce5efd92f7f2560
|
bd1dbac70e51ccbcc6217bcc2010de61459ad229
|
/ex5.py
|
148204c00295e482293cbc9ff265eba8b66fcb9b
|
[] |
no_license
|
crowley101/PyPractice
|
https://github.com/crowley101/PyPractice
|
b7882093902cbc8e823ee45ef002148fba39c917
|
14b6abfeb045f611f8bb1bd85837e32c999b19f3
|
refs/heads/master
| 2016-09-06T04:52:03.428772 | 2014-11-04T21:24:20 | 2014-11-04T21:24:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
name = 'Kevin E. Yeo'
age = 35
height = 195 # cm
weight = 120 # kg
eyes = 'Brown'
teeth = 'White'
hair = 'Blonde'
print "Let's talk about %r." % name
print "I'm %s centimeters tall." % height
print "I'm %d kilo grams heavy." % weight
print "Actually that's pretty heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee" % teeth
print "If I add %d, %d, and %d I get %d." % (age, height, weight, age + height + weight)
|
UTF-8
|
Python
| false | false | 2,014 |
10,170,482,571,904 |
e30e1984d7535ad5d6ae00449657a35d564490bc
|
8496a967452c4fa48ef0dd7acd50352d75cf8b96
|
/mine1.py
|
6c765da7e8f573930d6272a6ed0949a6f484fa61
|
[] |
no_license
|
aravindnatarajan/InsightProject
|
https://github.com/aravindnatarajan/InsightProject
|
abfc0eb6ee66cd05c481b7147048bb9cb551af6a
|
3f5b2faa2860ddcda8208adc2ade3cb186303237
|
refs/heads/master
| 2016-09-15T23:12:22.206532 | 2014-07-10T02:38:12 | 2014-07-10T02:38:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from bs4 import BeautifulSoup
import os
import sys
import re
from urllib2 import urlopen
def revstr(a):
b = ""
for i in range(len(a)-1,-1,-1): b += a[i]
return b
path = "http://www.goodreads.com"
myUrl1 = "http://www.goodreads.com/genres/pre-k"
myUrl2 = "http://www.goodreads.com/genres/new_releases/childrens"
def getListOfBooks(myUrl):
soup = BeautifulSoup(urlopen(myUrl).read())
links = [str(val) for val in soup.find_all('a') if '<a href="/book/show/' in str(val)]
books = []
for link in links:
ss = ""
for l in link[9:]:
if l == '"': break
ss += l
books.append(path+ss)
return books
bookUrls = getListOfBooks(myUrl1)
bookUrls += getListOfBooks(myUrl2)
#for book in bookUrls: print book
#sys.exit()
# <span id="freeTextContainer723150042311518910">Laszlo is afraid of the dark. The dark is not afraid of Laszlo. <br><br>Laszlo lives in a house. The dark lives in the basement. <br>$
def getDescription(myUrl):
allWords = str(urlopen(myUrl).read()).split()
subset = ""
found = False
for i in range(0,len(allWords)):
if "readable" in allWords[i] and "stacked" in allWords[i+1]:
found = True
for j in range(i+4,i+100):
if "</span>" in allWords[j]: break;
if ("<br><br>" in allWords[j]) or ("id=" in allWords[j]):
ss = ""
for lp in range(len(allWords[j])-1,-1,-1):
if allWords[j][lp] == '>': break
ss += allWords[j][lp]
allWords[j] = revstr(ss)
subset += allWords[j] + " "
if not found: return ""
ss = ""
for lp in range(0,len(allWords[j])):
if allWords[j][lp] == '<': break
ss += allWords[j][lp]
subset += ss
listWords = subset.split()
# print listWords
# sys.exit()
ss = ""
for word in listWords:
if "#" in word: continue
if ">" in word: continue
if "<" in word: continue
if "/" in word: continue
if "onclick" in word: continue
if "class=" in word: continue
ss += (word + " ")
return ss+'\n'
outf = open("data", "w")
for bookUrl in bookUrls:
print "Getting description for: " + bookUrl
outf.write(getDescription(bookUrl))
outf.write("\n")
outf.close()
#myUrl = "http://www.goodreads.com/book/show/15790852-the-dark"
#print getDescription(myUrl)
|
UTF-8
|
Python
| false | false | 2,014 |
12,214,887,029,763 |
1a5cfd30eaf2832eb42924b6c5d0d9c2c83917bf
|
efee082735bc865d01911ec70a4de97b7a5f0b38
|
/test_scripts/test_model.py
|
840816e57a376b9e1c0ad81c2bce0cc8051cd7fd
|
[] |
no_license
|
LukasHehn/pyWIMP
|
https://github.com/LukasHehn/pyWIMP
|
075b9c87014d85cd302f27f1a0668166e2a35ffb
|
8788895e3dcc62020d0586185b1c454c19aa5666
|
refs/heads/master
| 2021-01-21T00:38:30.599214 | 2012-06-13T11:04:15 | 2012-06-13T11:04:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pyWIMP.DMModels.wimp_model as wimp_model
import pyWIMP.DMModels.base_model as base_model
import ROOT
import sys
#ROOT.gROOT.SetBatch()
basevars = base_model.BaseVariables(0,1./365., 0, 100)
time = basevars.get_time()
energy = basevars.get_energy()
time.setVal(0)
time.setConstant(True)
model_normal = ROOT.RooRealVar("model_normal", \
"model_normal", \
1, 0, 100000)
print model_normal.getVal()
wm = wimp_model.WIMPModel(basevars, mass_of_wimp=10, kilograms=1)
wm2 = wimp_model.WIMPModel(basevars, mass_of_wimp=7, kilograms=1)
c1 = ROOT.TCanvas()
model = wm.get_model()
model_extend = wm.get_simple_model()
model_extend2 = wm2.get_simple_model()
model_extend = wm.get_model()
model_extend2 = wm2.get_model()
print model_extend.getNorm(0)
print "Model extend: ", model_extend.expectedEvents(ROOT.RooArgSet(energy))
print "Model extend, ratio: ", model_extend.expectedEvents(ROOT.RooArgSet(energy))*(0.751/0.561)
print "Model extend: ", model_extend2.expectedEvents(ROOT.RooArgSet(energy))
print "Model extend, ratio: ", model_extend2.expectedEvents(ROOT.RooArgSet(energy))*(0.751/0.561)
frame = energy.frame()
model_extend.plotOn(frame)
model_extend2.plotOn(frame)
frame.Draw()
c1.Update()
raw_input("E")
norm_value = wm.get_normalization().getVal()
model_normal.setVal(1e-4/norm_value)
print 1e-4/norm_value
for i in range(50):
energy.setVal(i*0.1)
print i*0.1, model_extend.getVal()
norm_value = wm2.get_normalization().getVal()
model_normal.setVal(1e-4/norm_value)
for i in range(50):
energy.setVal(i*0.1)
print i*0.1, model_extend2.getVal()
print norm_value
integral = model_extend.createIntegral(ROOT.RooArgSet(energy))
value = integral.getVal()
number_of_counts = 0
integral = model.createIntegral(ROOT.RooArgSet(energy))
print integral.getVal()
data = model.generate(ROOT.RooArgSet(energy), 1000)
model_extend.fitTo(data)
data.plotOn(frame)
model_extend.plotOn(frame)
frame.Draw()
c1.Update()
print wm.get_normalization().getVal()
print integral.getVal()*model_normal.getVal()
raw_input("E")
|
UTF-8
|
Python
| false | false | 2,012 |
10,316,511,452,481 |
f8a1346fa3d4397286b64aacf89654203d967b28
|
7ca4dc97632cacc4bd3ff9cae6080ee4186b9905
|
/src/Week4/class-test-1.py
|
4219b51beee7ed34be694650e47080113ce900a7
|
[
"Apache-2.0"
] |
permissive
|
yddong/Py3L
|
https://github.com/yddong/Py3L
|
c9d7a511fc1ac0b5e2aed23c7c3caeceed315c73
|
b11d66174e9b5a9e4c372cdd5fa413a790aeb00c
|
refs/heads/master
| 2020-12-25T17:14:42.180918 | 2013-07-18T18:35:58 | 2013-07-18T18:35:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import re
class Grammar:
lhs = {}
rhs = {}
def __init__(self, filename=None):
"this is a comment"
if filename:
self.readFile(filename)
def getRHS(self, lhs):
"Returns the list of RHS for a given LHS."
return self.lhs.get(lhs, [])
def getLHS(self, rhs):
return self.rhs.get(rhs, [])
def addRule(self, newlhs, newrhs):
value = self.lhs.get(newlhs, [])
value.append(newrhs)
self.lhs[newlhs] = value
value = self.rhs.get(newrhs, [])
value.append(newlhs)
self.rhs[newrhs] = value
def readFile(self, filename):
try:
inpfile = open(filename, encoding="utf-8", mode='r')
for line in inpfile.readlines():
match = re.search("(?P<lhs>\w+)\s*->\s*(?P<rhs>\w+(\s+\w+)*)(#.*)?", line)
if match:
lhs = match.group("lhs")
rhs = match.group("rhs")
self.addRule(lhs, tuple(rhs.split()))
inpfile.close()
except Exception:
pass
def printGrammar(self):
for x in self.lhs:
for y in self.lhs[x]:
print(x, "->", " ".join(y))
class ProbGrammar(Grammar):
def countProbs(self, rules):
pass
class Parser:
grammar = Grammar()
def parse(self, sentence):
# tokens = tuple(sentence.split())
agenda = [ tuple(sentence.split()) ]
while agenda:
tokens = tuple(agenda.pop())
for x in range(len(tokens)):
for y in range(1, len(tokens) - x + 1):
sequence = tokens[x:x+y]
print("sequence:", sequence)
replacementlist = self.grammar.getLHS( sequence )
print("replacementlist:", replacementlist)
if replacementlist:
replacement = list(tokens[:])
replacement[x:x+y] = [ replacementlist[0] ]
print("Adding to agenda:", replacement)
agenda.append(replacement)
print("Agenda", agenda)
myparser = Parser()
myparser.grammar.readFile("grammar1.txt")
# parse "John loves Mary" is "N V N"
myparser.parse("John loves Mary")
#mygrammar = Grammar("grammar1.txt")
#mygrammar.readFile("grammar1.txt")
#mygrammar.printGrammar()
#mygrammar.addRule("S", ("NP", "VP") )
#print(mygrammar.getRHS("S") , mygrammar.getLHS( ("NP", "VP") ))
|
UTF-8
|
Python
| false | false | 2,013 |
17,171,279,269,899 |
882e4f92afe992ce2af9aa82135089b903758494
|
0465a45cc087179ee7365959124a5fd59ab90428
|
/part1.py
|
9069bca8a15c96f1f53db849b96aaf6b43f599cf
|
[] |
no_license
|
ejetzer/Lab-Analysis-2013
|
https://github.com/ejetzer/Lab-Analysis-2013
|
7a35efdcf460286a0303ca0d3cbb136078aefa4d
|
08555e40732244fe319551b9d659e8bf5d0396dd
|
refs/heads/master
| 2015-08-11T03:17:17.794553 | 2014-04-06T16:38:37 | 2014-04-06T16:38:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from uncertainties import *
from graphs import plot_data
from shared import latex_output, text_output, csv_output, shelf_output
liquid_N_temp = (76.4, 0.1)
heat_capacity_Al = [0.35, 0.375, 0.4, 0.425, 0.45, 0.475, 0.5, 0.525, 0.55, 0.56, 0.575, 0.6, 0.61, 0.625, 0.64, 0.65, 0.66, 0.675, 0.675, 0.7, 0.7, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.85, 0.86, 0.86, 0.87, 0.87, 0.88, 0.88, 0.89, 0.89]
temp = 75
temperature_interval = 5
cT_graph = []
for c in heat_capacity_Al:
integrated_c = c * temperature_interval
cT_graph.append((temp, integrated_c))
temp += temperature_interval
def unprecise_integral(graph, x_range):
# Account for the uncertainties.
x_range = [x[0] + (-x[1])**i for i, x in enumerate(x_range)]
approximation = 0
for x, y in graph:
if x_range[0] <= x <= x_range[1]:
approximation += y
return approximation, 0.2
def heat_exchanged1(mass_Al, c_Al, T_range):
return product(mass_Al, unprecise_integral(c_Al, T_range))
def latent_heat1(mass_change, mass_Al, c_Al, T_range):
return quotient(heat_exchanged1(mass_Al, c_Al, T_range), mass_change)
def do1(runs, mass, temps):
L1s = []
slopes, offsets = plot_data(runs, mass[0],
latexstuff={'title': 'Method 1, mass {}, run{}',
'xlabel': 'Time (s)',
'ylabel': 'Mass change (g)'})
for run, temp, slope, offset in zip(runs, temps, slopes, offsets):
print(slope[0])
print(slope[2])
last = somme(product(slope[0], (300, 1)), offset[0])
first = somme(product(slope[2], (300, 1)), offset[2])
diff = difference(last, first)
L1 = latent_heat1(diff, mass, cT_graph,
[liquid_N_temp, temp])
L1s.append(L1)
return L1s
|
UTF-8
|
Python
| false | false | 2,014 |
14,431,090,133,272 |
b7bd575894fe3e1c7ea2b230bc1d1e7239ecbff3
|
ebae1bef3243fd71b85d77d2b21724485e72a723
|
/dber/sms/models.py
|
95df3de6cb22ec447d71bceedbd3b1a58701f95e
|
[] |
no_license
|
ochmaa/blackhole
|
https://github.com/ochmaa/blackhole
|
570abfcb422de21aa2aea1c5d8f62e195378200a
|
0cea22c610c219afedd9496ed5d239d04f99df73
|
refs/heads/master
| 2016-09-05T17:07:31.173748 | 2011-05-27T05:48:09 | 2011-05-27T05:48:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Plan(models.Model):
name = models.CharField(max_length=200)
tarif = models.IntegerField()
inbox_count = models.IntegerField(default=1000,choices = (
( -1, 'Unlimited'),
( 1000, '1000'),
(2000, '2000')))
def __unicode__(self):
return self.name
class AppUser(models.Model):
user = models.ForeignKey(User)
plan = models.ForeignKey(Plan)
remcash = models.IntegerField(default=15000)
inbox_count = models.IntegerField(default=1000)
def rules(self):
return self.rule_set.all()
def RM(self):
return self.userinbox_set.count()
def UNR(self):
return self.userinbox_set.filter(message__unread=True).count()
def RMP(self):
if self.RM()==0:
return 0
return float(self.UNR())/float(self.RM())*100
def SM(self):
return self.useroutbox_set.count()
def USM(self):
return self.useroutbox_set.filter(message__unread=True).count()
def SMP(self):
if self.SM()==0:
return 0
return float(self.USM())/float(self.SM())*100
def RCP(self):
return float(self.remcash)/float(self.plan.tarif)*100
def __unicode__(self):
return self.user.username
def virgin(self):
self.remcash = self.plan.tarif
self.inbox_count = self.plan.inbox_count
self.save()
def send_message_increment(self,phone,content):
msgobj = Message.objects.create(phone=phone,content=content)
UserOutbox.objects.create(message=msgobj,user=self)
pref = phone[:2]
telops = TelephoneOperator.objects.filter(prefixes__contains=pref)
for telop in TelephoneOperator.objects.all():
if pref in telop.prefixes.split(','):
self.remcash = self.remcash - telop.tarif
self.save()
def cant_recieve(self):
return self.inbox_count==0
def cant_reach_money(self):
mintarif = [op.tarif for op in TelephoneOperator.objects.all()]
return self.remcash<mintarif
def decrement_inbox(self):
self.inbox_count = self.inbox_count -1
self.save()
class TelephoneOperator(models.Model):
name = models.CharField(max_length=200)
prefixes = models.CharField(max_length=255)
tarif = models.IntegerField()
def __unicode__(self):
return self.name
class Rule(models.Model):
keyword = models.CharField(max_length=20)
redirect_url = models.URLField()
user = models.ForeignKey(AppUser)
def __unicode__(self):
return self.keyword
class Message(models.Model):
phone = models.CharField(max_length=20)
content = models.CharField(max_length=150)
ognoo = models.DateTimeField(auto_now=True)
unread = models.BooleanField(default=True)
class Meta:
ordering= ['ognoo','unread']
class UserInbox(models.Model):
user = models.ForeignKey(AppUser)
message = models.ForeignKey(Message)
def save(self, *args, **kwargs):
if self.user.cant_recieve():
print 'your inbox is full'
else:
super(UserInbox,self).save(*args,**kwargs)
self.user.decrement_inbox()
class UserOutbox(models.Model):
user = models.ForeignKey(AppUser)
message = models.ForeignKey(Message)
|
UTF-8
|
Python
| false | false | 2,011 |
5,265,629,938,158 |
af40b92334c050f90545277456f7930c26e4ad0d
|
47859b35c81b0a11c837b05c50b6592f68fc7a70
|
/tools/file/init.py
|
b8e29b03d0f0807700d8d8a828980d6b37f019db
|
[] |
no_license
|
ict4eo/eo4vistrails
|
https://github.com/ict4eo/eo4vistrails
|
be084482c6349b10dea9c16056baada2a713cb27
|
39d1e2905f16f58a5e5484ae682af37dd770b10e
|
refs/heads/master
| 2016-08-05T01:56:59.980966 | 2014-05-09T07:30:02 | 2014-05-09T07:30:02 | 17,738,355 | 2 | 0 | null | false | 2014-05-09T07:30:03 | 2014-03-14T07:17:22 | 2014-05-09T07:30:02 | 2014-05-09T07:30:02 | 7,048 | 1 | 1 | 1 |
Python
| null | null |
# -*- coding: utf-8 -*-
############################################################################
###
### Copyright (C) 2010 CSIR Meraka Institute. All rights reserved.
###
### This full package extends VisTrails, providing GIS/Earth Observation
### ingestion, pre-processing, transformation, analytic and visualisation
### capabilities . Included is the abilty to run code transparently in
### OpenNebula cloud environments. There are various software
### dependencies, but all are FOSS.
###
### This file may be used under the terms of the GNU General Public
### License version 2.0 as published by the Free Software Foundation
### and appearing in the file LICENSE.GPL included in the packaging of
### this file. Please review the following to ensure GNU General Public
### Licensing requirements will be met:
### http://www.opensource.org/licenses/gpl-license.php
###
### If you are unsure which license is appropriate for your use (for
### instance, you are interested in developing a commercial derivative
### of VisTrails), please contact us at vistrails@sci.utah.edu.
###
### This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
### WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
###
#############################################################################
"""This module is called by higher level inits to ensure that registration
with VisTrails takes place
"""
from core.modules.module_registry import get_module_registry
def initialize(*args, **keywords):
from core.modules import basic_modules
from core.modules.vistrails_module import Module
from Command import Command
from DirUtils import ListDirContent
from StringToFile import StringToFile
from FileToString import FileToString
from DataWriter import VectorLayerToFile, DataWriterTypeComboBox
reg = get_module_registry()
files_namespace = "tools|file"
# =========================================================================
# Abstract Modules - these MUST appear FIRST
# =========================================================================
# drop-down lists
reg.add_module(DataWriterTypeComboBox,
namespace=files_namespace,
abstract=True)
# =========================================================================
# ComboBox definitions
# =========================================================================
# =========================================================================
# Standard Modules - Ports defined here
# =========================================================================
# =========================================================================
# Control Flow Modules -
# =========================================================================
# =========================================================================
# Other Modules - without ports OR with locally defined ports
# =========================================================================
reg.add_module(ListDirContent,
namespace=files_namespace)
reg.add_module(StringToFile,
namespace=files_namespace)
reg.add_module(FileToString,
namespace=files_namespace)
reg.add_module(VectorLayerToFile,
namespace=files_namespace)
|
UTF-8
|
Python
| false | false | 2,014 |
12,103,217,847,305 |
38fff45b5e64432dd1b46a40bf718dc10eb9fe4b
|
ef33241e9d79595a361d19a021302e32bb21fee0
|
/lesson2/lesson2.py
|
04340d2939e8ad9019dd0a90c4335b0d5553965e
|
[] |
no_license
|
rewgoes/webdev_udacity
|
https://github.com/rewgoes/webdev_udacity
|
94e3814e89173496ed58ca8a38609eac13a6e218
|
28a1ee33d92c5195b719ede49ec59099f1a5edbd
|
refs/heads/master
| 2021-01-01T20:00:11.928268 | 2014-11-30T21:14:36 | 2014-11-30T21:14:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import webapp2
import cgi
import string
import re
def escape_html(s):
return cgi.escape(s, quote = True)
form1 = """
<form method="post">
What is your birthday?
<br>
<label>
Month
<input type="text" name="month" value="%(month)s">
</label>
<label>
Day
<input type="text" name="day" value="%(day)s">
</label>
<label>
Year
<input type="text" name="year" value="%(year)s">
</label>
<div style="color: red">%(error)s</div>
<br><br>
<input type="submit">
</form>
"""
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
def valid_day(day):
if(day and day.isdigit()):
day = int(day)
if(day < 32 and day > 0):
return day
def valid_month(month):
if(month):
month = month.capitalize()
if(month in months):
return month
def valid_year(year):
if(year and year.isdigit()):
year = int(year)
if(year < 2020 and year > 1880):
return year
class MainPage(webapp2.RequestHandler):
def write_form(self, error="", month="", day="", year=""):
self.response.out.write(form1 %{"error": error,
"month": escape_html(month),
"day": escape_html(day),
"year": escape_html(year)})
def get(self):
self.write_form()
def post(self):
user_month = self.request.get('month')
user_day = self.request.get('day')
user_year = self.request.get('year')
month = valid_month(user_month)
day = valid_day(user_day)
year = valid_year(user_year)
if not(month and day and year):
self.write_form("That doesn't look valid to me, friend.", user_month, user_day, user_year)
else:
self.redirect("/thanks")
class ThanksHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write("Thanks! That's a totally valid day!")
form2 = """
<h1>Enter some text to ROT13:</h1>
<form method="post">
<textarea name="text" style="height: 100px; width: 400px;">%(text)s</textarea>
<br />
<input type="submit" />
</form>
"""
rot13 = string.maketrans("ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz","NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm")
class Rot13Handler(webapp2.RequestHandler):
def write_form(self, text=""):
self.response.out.write(form2 %{"text": escape_html(text)})
def get(self):
self.write_form()
def post(self):
user_text = self.request.get('text')
self.write_form(user_text.encode("rot13"))
formSingUp = """
<html>
<head>
<title>Sign Up</title>
<style type="text/css">
.label {text-align: right}
.error {color: red}
</style>
<style type="text/css"></style>
</head>
<body>
<h2>Signup</h2>
<form method="post">
<table>
<tbody><tr>
<td class="label">
Username
</td>
<td>
<input type="text" name="username" value="%(username)s">
</td>
<td class="error">
%(usernameError)s
</td>
</tr>
<tr>
<td class="label">
Password
</td>
<td>
<input type="password" name="password">
</td>
<td class="error">
%(passwordError)s
</td>
</tr>
<tr>
<td class="label">
Verify Password
</td>
<td>
<input type="password" name="verify">
</td>
<td class="error">
%(vPasswordError)s
</td>
</tr>
<tr>
<td class="label">
Email (optional)
</td>
<td>
<input type="text" name="email" value="%(email)s">
</td>
<td class="error">
%(emailError)s
</td>
</tr>
</tbody></table>
<input type="submit">
</form>
</body>
</html>
"""
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
EMAIL_RE = re.compile(r"^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$")
def valid_username(username):
return USER_RE.match(username)
def valid_email(email):
return EMAIL_RE.match(email)
class UserSingupHandler(webapp2.RequestHandler):
def write_form(self, username="", email="", usernameError="", passwordError="", vPasswordError="", emailError=""):
self.response.out.write(formSingUp %{"username": escape_html(username),
"email": escape_html(email),
"usernameError": usernameError,
"passwordError": passwordError,
"vPasswordError": vPasswordError,
"emailError": emailError})
def get(self):
self.write_form()
def post(self):
user_username = self.request.get("username")
user_password = self.request.get("password")
user_vPassword = self.request.get("verify")
user_email = self.request.get("email")
usernameError = ""
passwordError = ""
vPasswordError = ""
emailError = ""
fail=False
if not (user_username and valid_username(user_username)):
usernameError="That's not a valid username."
fail=True
if not user_password:
passwordError="That wasn't a valid password."
fail=True
if not user_password == user_vPassword:
vPasswordError="Your passwords didn't match."
fail=True
if user_email and not valid_email(user_email):
emailError = "That's not a valid email."
fail=True
if not fail:
self.redirect("/unit2/welcome?username=%s" % user_username)
else:
self.write_form(username=user_username,
email=user_email,
usernameError=usernameError,
passwordError=passwordError,
vPasswordError=vPasswordError,
emailError=emailError)
class SingupThanksHandler(webapp2.RequestHandler):
def get(self):
user_username = self.request.get("username")
self.response.out.write("Welcome, %s!" % user_username)
application = webapp2.WSGIApplication([('/', MainPage),
('/thanks', ThanksHandler),
("/unit2/rot13",Rot13Handler),
("/unit2/singup",UserSingupHandler),
("/unit2/welcome",SingupThanksHandler)],debug=True)
|
UTF-8
|
Python
| false | false | 2,014 |
5,841,155,548,165 |
02a94e5c31966da64bfa6bad212c1abc3bfa5c1e
|
67ae6d42b46971705e01bff4c503c793f17646eb
|
/hosts/tests.py
|
634d7ba507a3ee365f34ed205fb4efebfed00c6c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
non_permissive
|
edazdarevic/shipyard
|
https://github.com/edazdarevic/shipyard
|
8bdfe7f488ef6650b20f8ab0e0b8b73e3e196cbb
|
d911a5557e7fa2974bf756d78491414fa71c6ce3
|
refs/heads/master
| 2021-01-23T00:16:08.359875 | 2013-12-16T13:31:00 | 2013-12-16T13:31:08 | 15,232,891 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from tastypie.test import ResourceTestCase
from django.contrib.auth.models import User
class HostResourceTest(ResourceTestCase):
fixtures = ['test_hosts.json']
def setUp(self):
super(HostResourceTest, self).setUp()
self.api_list_url = '/api/v1/hosts/'
self.username = 'testuser'
self.password = 'testpass'
self.user = User.objects.create_user(self.username,
'testuser@example.com', self.password)
self.api_key = self.user.api_key.key
def get_credentials(self):
return self.create_apikey(self.username, self.api_key)
def test_get_list_unauthorzied(self):
"""
Test get without key returns unauthorized
"""
self.assertHttpUnauthorized(self.api_client.get(self.api_list_url,
format='json'))
def test_get_list_json(self):
"""
Test get application list
"""
resp = self.api_client.get(self.api_list_url, format='json',
authentication=self.get_credentials())
self.assertValidJSONResponse(resp)
def test_get_detail_json(self):
"""
Test get application details
"""
url = '{}1/'.format(self.api_list_url)
resp = self.api_client.get(url, format='json',
authentication=self.get_credentials())
self.assertValidJSONResponse(resp)
data = self.deserialize(resp)
keys = data.keys()
self.assertTrue('name' in keys)
self.assertTrue('hostname' in keys)
self.assertTrue('port' in keys)
self.assertTrue('enabled' in keys)
|
UTF-8
|
Python
| false | false | 2,013 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.