__id__
int64 17.2B
19,722B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
133
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 3
values | repo_name
stringlengths 7
73
| repo_url
stringlengths 26
92
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 12
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 61.3k
283M
⌀ | star_events_count
int64 0
47
| fork_events_count
int64 0
15
| gha_license_id
stringclasses 5
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
5.76M
⌀ | gha_stargazers_count
int32 0
82
⌀ | gha_forks_count
int32 0
25
⌀ | gha_open_issues_count
int32 0
80
⌀ | gha_language
stringclasses 5
values | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 19
187k
| src_encoding
stringclasses 4
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | year
int64 2k
2.01k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,181,982,046,060 |
125ebeea7f9f1e7e9d10924041148e20074cceba
|
3b986e8125b8a8e2c475aff255b6c11a89bbed14
|
/website/offers/admin.py
|
0648d38bee582ab5b14dbf2483f88340c5f20faa
|
[] |
no_license
|
grp6-genlog/cool
|
https://github.com/grp6-genlog/cool
|
575ca3d880f6d5eab6a0d30aa3c5697f27bd709c
|
53ed19981d62fe60076e2ea52473a0ac14f03dbc
|
refs/heads/master
| 2016-09-05T17:02:46.841888 | 2010-12-24T00:07:31 | 2010-12-24T00:07:31 | 1,109,836 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from website.offers.models import Offer
admin.site.register(Offer)
|
UTF-8
|
Python
| false | false | 2,010 |
13,469,017,445,357 |
f3cdbdf43f34d2e2e0ec0665a03642b42234db8b
|
4ae36a0be4d359741c2278a89d1df9f0c7b680fa
|
/wwatcher.py
|
33d2e4096b2e4a250f0e6983cf8d1638f584c5c6
|
[] |
no_license
|
coxlab/wwatcher
|
https://github.com/coxlab/wwatcher
|
2297b34e4d01a792f647581fb173cf2c4f1a0454
|
e6efabb1d53f9b96b982cd3b53ec63b7492c1fa7
|
refs/heads/master
| 2021-05-26T22:47:26.778609 | 2013-08-02T17:25:59 | 2013-08-02T17:25:59 | 11,372,670 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import gspread
import sys
import pylab
import argparse
import getpass
import datetime
import wwatcher
from matplotlib import pyplot
import matplotlib.dates
from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY
import random
from matplotlib import legend
def main():
'''
Parse command line options to analyze animal weight data from Google Sheets. Creates a WeightWatcher class and executes
methods specified by the user on the command line.
'''
#TODO add spreadsheet name and url customizability to command line interface
parser = argparse.ArgumentParser(description="A command line tool to analyze animal weights stored in Google Sheets", \
usage="wwatcher.py Username animalName1 animalName2 animalName3 [options] \n\
or \n\
wwatcher.py [options] Username animalName1 animalName2 animalName3")
parser.add_argument('username', help="Google Docs username, required as first argument (e.g. chapman@coxlab.org)")
parser.add_argument('animals', help="Animal IDs to analyze, separated by spaces. At least 1 is required, but you \
can add as many as you want", nargs="+")
parser.add_argument('-c', action="store_true", default=False, help="Check to make sure each animal weighed at least \
90 percent of its most recent maximum (weekend) value for the last 4 weekdays")
parser.add_argument('-d', help="Specify the number of weekdays to analyze with -c option")
parser.add_argument('-g', action="store_true", default=False, help="Make a graph of each animal's weight over time")
parser.add_argument('-a', action="store_true", default=False, help="Make one graph of every animal's weight over time")
parser.add_argument('-r', action="store_true", default=False, help="Graph a linear regression where x values are max weights \
and y values are the previous week's average daily weight")
parsed = parser.parse_args()
#make sure at least 1 specified option calls a WeightWatcher class method, else give the user help and exit
if (parsed.c == False) and (parsed.g == False) and (parsed.a == False) and (parsed.r == False):
parser.print_help()
sys.exit()
username = parsed.username
animals = parsed.animals
#if the username is weights@coxlab.org, no need to ask for password in terminal. It's this crazy string, and we want to run
#the script automatically without stopping for user input every week
if username == "weights@coxlab.org":
password = "}ONCuD*Xh$LNN8ni;0P_HR_cIy|Q5p"
else:
password = getpass.getpass("Enter your Google Docs password: ")
watcher = wwatcher.WeightWatcher(username, password, animals)
#if the user selects the -c option, check animal weights to make sure they don't go below 90% max
if parsed.c:
if parsed.d:
HeavyEnoughDict = watcher.IsHeavyEnough(days=parsed.d)
else:
HeavyEnoughDict = watcher.IsHeavyEnough()
#make a list of animals that aren't heavy enough
problem_animals = []
for animal in animals:
if not HeavyEnoughDict[animal]:
problem_animals.append(animal)
#TODO implement email functionality for alerts when this option is run automatically
if len(problem_animals) == 0:
print "Animal weights look fine. Awesome!\n"
else:
for each in problem_animals:
print "A stupid algorithm thinks %s is underweight. You might want to check on him!" % each
if parsed.g:
#dict with animals ID strings as keys and a list of lists of the same length [[dates], [weights for those dates], [whether it was a weekend weight Boolean]]
data_for_graph = watcher.format_data_for_graph()
for animal in animals:
dates = data_for_graph[animal][0]
weights = data_for_graph[animal][1]
fig = pyplot.figure(str(datetime.date.today()))
pyplot.title("Animal weight over time")
pyplot.ylabel("Animal Weight (g)")
ax = fig.gca()
mondays = WeekdayLocator(MONDAY, interval=2)
alldays = DayLocator()
weekFormatter = DateFormatter('%b %d %y')
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
r = lambda: random.randint(0,255)
ax.plot_date(matplotlib.dates.date2num(dates), weights, '#%02X%02X%02X' % (r(),r(),r()), lw=2, label=str(animal))
pyplot.axis(ymin=400, ymax=750)
ax.legend(loc='best')
ax.xaxis_date()
ax.autoscale_view()
pyplot.setp(fig.gca().get_xticklabels(), rotation=35, horizontalalignment='right')
pyplot.show()
if parsed.a:
#dict with animals ID strings as keys and a list of lists of the same length [[dates], [weights for those dates], [whether it was a weekend weight Boolean]]
data_for_graph = watcher.format_data_for_graph()
for animal in animals:
dates = data_for_graph[animal][0]
weights = data_for_graph[animal][1]
fig = pyplot.figure(str(datetime.date.today()))
pyplot.title("Animal weight over time")
pyplot.ylabel("Animal Weight (g)")
ax = fig.gca()
mondays = WeekdayLocator(MONDAY, interval=2)
alldays = DayLocator()
weekFormatter = DateFormatter('%b %d %y')
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
r = lambda: random.randint(0,255)
ax.plot_date(matplotlib.dates.date2num(dates), weights, '#%02X%02X%02X' % (r(),r(),r()), lw=2, label=str(animal))
pyplot.axis(ymin=400, ymax=750)
ax.legend(loc='best')
ax.xaxis_date()
ax.autoscale_view()
pyplot.setp(fig.gca().get_xticklabels(), rotation=35, horizontalalignment='right')
pyplot.show()
if parsed.r:
data_for_graph = watcher.regression()
fitted = pylab.polyfit(data_for_graph[0], data_for_graph[1], 1)
line = pylab.polyval(fitted, data_for_graph[0])
pylab.plot(data_for_graph[0], line)
pylab.scatter(data_for_graph[0], data_for_graph[1])
pylab.xlabel('Weekend (max) weight')
pylab.ylabel('Avg Weekday Weight')
pylab.show()
if __name__ == '__main__':
main()
class Spreadsheet(object):
'''
An instance of this class uses the gspread package (https://github.com/burnash/gspread)
to communicate with the Google Docs API. This opens the first worksheet in a spreadsheet
specified in __init__ (i.e. sheet1 in 'Daily Weights after 7-11-13')
'''
def __init__(self, username, password, spreadsheet_name='Daily Weights after 7-11-13', spreadsheet_url=None):
'''
param username: A string, the user's Google Docs email (e.g. chapman@coxlab.org)
param password: A string, the user's password for Google Docs
param spreadsheet_name: A string, name of the spreadsheet from which you want data,
as it appears in Google Docs (e.g. "Daily Weights after 7-11-13")
param spreadsheet_url: A string, the url for a Google Docs spreadsheet if you want to use a different one
'''
print "\nLogging into Google Docs..."
self.login = gspread.login(username, password)
print "Importing spreadsheet from Google Docs..."
if spreadsheet_url == None:
self.worksheet_open = self.login.open(spreadsheet_name).sheet1
else:
self.worksheet_open = self.login.open_by_url(spreadsheet_url).sheet1
class WeightWatcher(object):
def __init__(self, username, password, animals, spreadsheet_name='Daily Weights after 7-11-13', \
spreadsheet_url=None):
'''
An instance of the WeightWatcher class has a spreadsheet class attribute to
access Google Sheets data with animal weights. The WeightWatcher class
also has methods to monitor and analyze animal weights.
param username: a string, login email for Google Docs
param password: a string, login password for Google Docs
param animals: a list, where each item in the list is an animal ID (str)
param spreadsheet_name (optional): a string, Name of spreadsheet you want to parse,
default is currently the Cox lab shared sheet 'Daily Weights after 7-11-13'
param spreadsheet_url (optional): a string, url for a spreadsheet if you want to
use this instead of a sheet name or the default spreadsheet_name
'''
#self.data is a list of lists with all the spreadsheet data
#e.g. nested list ['date/time', 'username@coxlab.org', 'animal ID', 'weight', 'after water? yes or no'] <--one row from spreadsheet
self.data = Spreadsheet(username, password, spreadsheet_name, spreadsheet_url).worksheet_open.get_all_values()
print "Successfully imported spreadsheet\n"
self.animals_to_analyze = animals
self.data_list_length = len(self.data)
def IsHeavyEnough(self, days=4):
'''
#go through last 4 weekday weights of each aninmal specified by user and make sure each day it weighs at least 90
percent its most recent max weight
param self.animals_to_analyze should be a list of strings
*Returns a dict with animal names (str) as keys and True as the value iff each of the last 4 weekdays
it weighed enough*
'''
#================================================================================================================
#get latest max weights from backwards spreadsheet (backwards so it starts looking for most recent data)
#make dictionary to store animal names as keys and max weights as values
#use data_position to remember where you are in the backwards (i.e. most recent) weights data during while loop
maxes = {}
animals_copy = self.animals_to_analyze[:]
data_position = 0
backwards_data = self.data[::-1]
#do the following until we've gotten every animal's max weight
#backwards_data[data_position[4] is overnight h20 column, "yes" means the comp has found a max weight
#backwards_data[data_position][2] is animal ID in the spreadsheet, so the first boolean makes sure it's an animal
#for which the user wants to verify the weight
while (len(animals_copy)) > 0 and (data_position < self.data_list_length):
if (backwards_data[data_position][2] in animals_copy) and ("yes" in backwards_data[data_position][4]):
#make sure there's an animal weight (not '-' or 'x' in position backwards_data[data_pos][4]
#by trying to make the string an int; if there's an exception it's not a valid animal weight
try:
animal_weight = int(backwards_data[data_position][3])
#if no exception, add key (animal ID as string) and value (weight as int) to maxes dict
maxes[backwards_data[data_position][2]] = animal_weight
animals_copy.remove(backwards_data[data_position][2])
except ValueError:
pass #print "ValueError at %s, skipping to next cell" % data_position (used for testing)
data_position += 1
print '\nMax weights: ' + str(maxes) + "\n"
#make sure all animal max weights were found
if len(animals_copy) > 0:
raise Exception("Could not find max weight for: " + str(animals_copy).strip('[]'))
#================================================================================================================
#get most recent 4 weekday weights for each animal
#make mins dict to store animal ID (str) as keys and 4 weekday weights as values (a list of ints)
def DaysNeeded(animals_copy, days):
'''
Returns a dict with a starting value of days (4 default) (int) for each animal ID key (str) in animals_copy
Used in the while loop below to make it keep looping until each animal has at least 4 weekday weights
'''
days_status = {}
for each in animals_copy:
days_status[each] = days
return days_status
def AllDaysRetrieved(DaysNeededDic):
'''
Returns a boolean to indicate whether EVERY animal has 4 weekday weights recorded, indicated by a value of 0
in countdown
'''
dict_values = DaysNeededDic.values()
for each in dict_values:
if each > 0:
return False
return True
def MakeDictLists(animals_copy):
'''
make an empty list as the value for each animal (key) in weekday_weights
'''
dictionary = {}
for each in animals_copy:
dictionary[each] = []
return dictionary
animals_copy = self.animals_to_analyze[:]
#default number of days (4) used below "DaysNeeded(animals_copy, days) specified in WeightWatcher.IsHeavyEnough attributes
countdown = DaysNeeded(animals_copy, days)
weekday_weights = MakeDictLists(animals_copy)
data_position = 0
#check to see if every animal has 4 weekday weights before continuing in the while loop
while not (AllDaysRetrieved(countdown)) and (data_position < self.data_list_length):
#do the following if the data position (row) is for an animal in self.animals_to_analyze and it's
#a weekday weight (i.e. "no" in column 5 of the spreadsheet)
if (backwards_data[data_position][2] in animals_copy) and ("no" in backwards_data[data_position][4]):
try:
animal_weight = int(backwards_data[data_position][3])
except ValueError:
pass #print "Couldn't get weight at %s, skipping to next cell" % data_position
else:
if countdown[backwards_data[data_position][2]] > 0:
weekday_weights[backwards_data[data_position][2]].append(animal_weight)
countdown[backwards_data[data_position][2]] -= 1
data_position += 1
print "Latest weekday weights: " + str(weekday_weights) + "\n"
if not AllDaysRetrieved(countdown):
raise Exception("Could not find weekly weight for all animals")
#================================================================================================================
#make a dict with animal ID keys (str) and True or False values if the animal weighed more than 90% of
#its max (weekend) weight or less, respectively. Days equal to 90% of its max make
#the animal "false" in IsHeavyEnoughDict
IsHeavyEnoughDict = {}
for animal in self.animals_to_analyze:
for each in weekday_weights[animal]:
if float(each) > (0.9*(maxes[animal])):
IsHeavyEnoughDict[animal] = True
else:
IsHeavyEnoughDict[animal] = False
break
return IsHeavyEnoughDict
#====================================================================================================================
#====================================================================================================================
def format_data_for_graph(self):
'''
Returns a dict with animal IDs (str) as keys and a list of lists [[date objects list], [weights as ints list],
[is_maxwgt list of Booleans]] as values.
e.g. {"Q4":[[dates], [weights]]}
'''
def date_string_to_object(date_string):
'''
Takes in a date as a string from the spreadsheet (format 'month/day/year hrs:min:secs' or 'month/day/year')
and returns that date as a date object from the datetime module
'''
#make splat, which is a list with date info e.g. ['month', 'day', 'year', 'hrs', 'min', 'sec']
#makes date_obj, which is a python datetime object
formatted = date_string.replace(":", "/").replace(" ", "/")
splat = formatted.split("/")
#splat[2] is year, splat[0] is month, and splat[1] is day. This is the format required by datetime.date
date_obj = datetime.date(*(map(int, [splat[2], splat[0], splat[1]])))
return date_obj
data_copy = self.data[:]
animals = self.animals_to_analyze[:]
graph_dict = {}
for animal in animals:
print "Getting data for %s" % animal
data_position = 0
#dates is a list of date objects
dates = []
#weights is a list of weights corresponding to the date objects above
weights = []
#maxweight is a list of true or false for whether each date/weight pair was max weight "true"/"yes"
#or a normal weekly weight "false"/"no" in data_copy[data_position][4]
is_maxwgt = []
while (data_position < self.data_list_length):
if (data_copy[data_position][2] == animal):
try:
wgt = int(data_copy[data_position][3])
weights.append(wgt)
except ValueError:
pass #print "Couldn't get weight at %s, skipping to next cell" % data_position
#used for testing
else:
date = date_string_to_object(data_copy[data_position][0])
dates.append(date)
if "yes" in data_copy[data_position][4]:
is_maxwgt.append(True)
else:
is_maxwgt.append(False)
data_position += 1
#after it has gotten dates, weights, is_maxwgt for each animal, put that info in graph_dict with
#animal ID as the key for your list of lists
graph_dict[animal] = [dates, weights, is_maxwgt]
return graph_dict
#====================================================================================================================
#====================================================================================================================
#TODO test this method better, lots of confusing while loops here
def regression(self):
'''
Returns 2 lists in a tuple: a weekend weights list, and a list of average weights from the most recent 4 weekdays (during
water reprivation) associated with those weekend weights.
'''
class addAppend(object):
'''
A class the counts to 4 items in a list then averages those items, helps in a while loop below
'''
def __init__(self):
self.intList = []
self.avg = False
def addInt(self, num):
if len(self.intList) < 4:
self.intList.append(num)
elif len(self.intList) == 4:
summed = sum(self.intList)
self.avg = summed/4.0
else:
pass
weekend_weights = []
weekday_avgs = []
data_rev = self.data[::-1]
animals_copy = self.animals_to_analyze[:]
for animal in animals_copy:
data_position = 0
while (data_position < self.data_list_length):
if (data_rev[data_position][2] == animal) and ("yes" in data_rev[data_position][4]):
new_position = data_position
count_four = addAppend()
weekend_wgt = None
while not count_four.avg and (new_position < self.data_list_length):
if (data_rev[new_position][2] == animal) and ("no" in data_rev[new_position][4]):
try:
weekend_wgt = int(data_rev[data_position][3])
weekday_wgt = int(data_rev[new_position][3])
except ValueError:
pass
else:
count_four.addInt(weekday_wgt)
new_position += 1
if type(count_four.avg) is float:
weekday_avgs.append(count_four.avg)
weekend_weights.append(weekend_wgt)
data_position += 1
return (weekend_weights, weekday_avgs)
#====================================================================================================================
#====================================================================================================================
|
UTF-8
|
Python
| false | false | 2,013 |
11,450,382,843,838 |
701608547e7dcf868949932b210fb3e9e7128c68
|
bb34af538683d79e2d037ee5b715eb533b198dd7
|
/scripts/expertise/expertise_structure_shipwright_hypothesis_4.py
|
4b120a1097bf5c2b9c155b6526983514545b3fe6
|
[
"LGPL-3.0-only",
"GPL-1.0-or-later"
] |
non_permissive
|
Xata/NGECore2
|
https://github.com/Xata/NGECore2
|
30bf0ade3f1214801ef9ee1b2ab63ab1fd2ede70
|
32d2b187259e6d00f13d4065c33e7a8175710b77
|
refs/heads/master
| 2020-04-06T06:41:37.313522 | 2014-05-26T22:58:33 | 2014-05-26T22:58:33 | 20,203,641 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
def addAbilities(core, actor, player):
actor.addAbility("expertise_structure_shipwright_hypothesis_4")
return
def removeAbilities(core, actor, player):
actor.removeAbility("expertise_structure_shipwright_hypothesis_4")
return
|
UTF-8
|
Python
| false | false | 2,014 |
5,738,076,328,061 |
a006865ef1d8d212105fa3bc01d1fcde16abd9f8
|
94cb040af7d82968b8655e3b1c978b4a0c9528dc
|
/code/httprequest.py
|
7c119ab0e61a317624e61aeba788172132d47e23
|
[] |
no_license
|
speer/sws
|
https://github.com/speer/sws
|
95fafdbefd76cd2d42ba8c272d9a8b766e2274d7
|
911621e20e5af6a190d26301a3598e5c96e66a65
|
refs/heads/master
| 2020-04-05T20:38:50.502752 | 2012-07-23T18:36:08 | 2012-07-23T18:36:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
import os
from os import path, sep, stat
from time import gmtime, strftime
import subprocess
import cPickle
import socket
import urllib
import threading
import logging
# not python standard lib - for mime type detection
import magic
# this class is a wrapper for the request and response object, in order to be pickled and sent over sockets
class RequestResponseWrapper:
def __init__ (self, request, response):
self.request = request
self.response = response
# this class represents a request object, i.e. a parsed version of the requestmessage
class Request:
def __init__ (self):
# dictionary of header fields
self.headers = {}
# dictionary of environment variables provided to CGI scripts
self.cgiEnv = {}
# method (GET, HEAD, POST)
self.method = None
# request URI
self.uri = None
# filepath of the accessed resource
self.filepath = ''
# pathinfo variable for cgi scripts
self.cgiPathInfo = None
# used protocol in the request (HTTP/1.X)
self.protocol = None
# query part of the URI
self.query = ''
# body of the request
self.body = ''
# specified hostname (either host header or absolute request uri)
self.host = None
# ip address of the client
self.remoteAddr = None
# fully qualified domain name of the client
self.remoteFqdn = None
# remote port (of the client)
self.remotePort = None
# server port
self.serverPort = None
# ip address of the server
self.serverAddr = None
# virtualhost that matches the request
self.virtualHost = None
# cgi directory matching the request
self.cgiDirectory = None
# executor for the cgi request (ex /bin/bash)
self.cgiExecutor = None
# list of matching directory directives for this request
self.directoryChain = ['/']
def getHeader(self,key):
if key.title() in self.headers:
return self.headers[key.title()]
else:
return None
def setHeader(self,key,value):
self.headers[key.title()] = value
def getContentLength(self):
contentLength = 0
try:
contentLength = int(self.getHeader('content-length'))
except Exception:
pass
return contentLength
# this class represents a response object from which a HTTP response message can be created
class Response:
def __init__ (self):
# dictionary of header fields
self.headers = {}
# dictionary of header fields provided in the response of a cgi script
self.cgiHeaders = {}
# statuscode of the request (HTTP/1.1 200 OK)
self.statusCode = 200
# statusMessage of the request (HTTP/1.1 200 OK)
self.statusMessage = 'OK'
# content-length of the response
self.contentLength = 0
# content-type of the response
self.contentType = None
# message to be flushed to client
self.message = ''
# True when CGI local location redirect
self.reprocess = False
# True when the first chunks of data have been sent to client/listener, i.e. status, etc.
self.flushed = False
# Becomes true when last chunk of data was sent to listener
self.connectionClose = False
def getHeader(self,key):
if key.title() in self.headers:
return self.headers[key.title()]
else:
return None
def setHeader(self,key,value):
self.headers[key.title()] = value
def getCGIHeader(self,key):
if key.title() in self.cgiHeaders:
return self.cgiHeaders[key.title()]
else:
return None
def setCGIHeader(self,key,value):
self.cgiHeaders[key.title()] = value
# This class contains the main HTTP functionality (parsing, etc.)
class HttpRequest:
SERVER_NAME = 'SWS/1.0'
CGI_PROTOCOL = 'CGI/1.1'
HTTP_PROTOCOL = 'HTTP/1.1'
ACCEPTED_PROTOCOLS = ['HTTP/1.0','HTTP/1.1']
ACCEPTED_REQUEST_TYPES = ['GET','HEAD','POST']
def __init__ (self, connection, config):
# object which contains the configuration of the server
self.config = config
# Socket connection, either to client or to listener
self.connection = connection
# True when the connection was closed
self.connectionClosed = False
# request and response objects
self.request = Request()
self.response = Response()
# temporary received/sent data (used for select system call)
self.tmpData = ''
# received request header
self.requestHeader = ''
# received request body
self.requestBody = ''
# True when the request header was successfully received
self.headerReceived = False
# used to prevent cgi endless recursions
self.requestNumber = 1
# Output Filter Processor
self.ofProcessor = OutputFilterProcessor(self)
# log into access-log file
def logAccess(self):
referer = '-'
useragent = '-'
host = '-'
req = '-'
if self.request.getHeader ('referer') != None:
referer = self.request.getHeader('referer')
if self.request.getHeader ('user-agent') != None:
useragent = self.request.getHeader('user-agent')
if self.request.host != None:
host = self.request.host
if self.request.method != None and self.request.uri != None and self.request.protocol != None:
req = self.request.method + ' ' + self.request.uri + ' ' + self.request.protocol
logging.getLogger(self.request.virtualHost).info('%s:%i %s - - [%s] "%s" %i %i "%s" "%s"' % (host,self.request.serverPort,self.request.remoteAddr,strftime("%d/%b/%Y:%H:%M:%S %z"),req,self.response.statusCode,self.response.contentLength,referer,useragent))
# log into error-log file
def logError(self, message):
logging.getLogger(self.request.virtualHost).error('[%s] [error] [client %s] %s' % (strftime("%a %b %d %H:%M:%S %Y"), self.request.remoteAddr, message.replace('\n','').strip()))
# determines connection specific variables
def determineHostVars (self):
self.request.serverAddr = self.connection.getsockname()[0]
self.request.serverPort = self.connection.getsockname()[1]
self.request.remoteAddr = self.connection.getpeername()[0]
self.request.remotePort = self.connection.getpeername()[1]
if self.config.configurations['hostnamelookups']:
self.request.remoteFqdn = socket.getfqdn(self.request.remoteAddr)
else:
self.request.remoteFqdn = self.request.remoteAddr
# initialize virtualhost to default virtualhost
self.request.virtualHost = self.config.defaultVirtualHost
def unpickle(self,msg):
wrapper = cPickle.loads(msg)
self.request = wrapper.request
self.response = wrapper.response
def pickle(self,newResponse=False):
response = self.response
if newResponse:
response = Response()
data = cPickle.dumps(RequestResponseWrapper(self.request,response))
return str(len(data))+';'+data
# receives a pickled request/response wrapper object from the listener and unpickles it
def receiveRequestFromListener(self):
data = 'init'
msg = ''
msgLength = -1
while data != '':
data = self.connection.recv(self.config.configurations['socketbuffersize'])
msg = msg + data
m = re.match(r'(\d+);(.*)',msg,re.DOTALL)
if m != None and msgLength == -1:
msgLength = int(m.group(1))
msg = m.group(2)
if msgLength <= len(msg):
# all data received
break
# unpickle request
self.unpickle(msg)
# receives a request message from the client
# can be called several times and returns true when request was fully received
def receiveRequestFromClient(self):
if not self.headerReceived:
# receive request header
data = self.connection.recv(self.config.configurations['socketbuffersize'])
self.tmpData = self.tmpData + data
m = re.match(r'((.+)\r\n\r\n)(.*)',self.tmpData,re.DOTALL)
if m != None:
# headers fully received
self.requestHeader = self.tmpData[:self.tmpData.find('\r\n\r\n')]
self.requestBody = self.tmpData[self.tmpData.find('\r\n\r\n')+4:]
return self.parseHeader()
if data == '':
return True
return False
else:
# receive request body
self.requestBody = self.requestBody + self.connection.recv(self.config.configurations['socketbuffersize'])
return self.checkRequestBodyReceived()
# returns true if the request body was fully received, otherwise false
def checkRequestBodyReceived(self):
if len(self.requestBody) >= self.request.getContentLength():
self.request.body = self.requestBody
return True
else:
return False
# parses the header message
# if there was a syntax error (400) or the request (incl.) body was fully received, it returns true
# if the request header syntax is OK, but just parts of the body arrived, it returns false
def parseHeader(self):
self.headerReceived = True
self.requestHeader = self.requestHeader.lstrip()
lines = self.requestHeader.split('\r\n')
first = True
for line in lines:
line = line.strip()
line = re.sub('\s{2,}', ' ', line)
if first:
# request line
words = line.split(' ')
if len(words) != 3:
self.setBadRequestError('Bad Request Line')
return True
self.request.method = words[0].upper()
self.parseURI(words[1])
self.request.protocol = words[2].upper()
first = False
else:
if (line == ''):
break
# header line
pos = line.find(':')
if pos <= 0 or pos >= len(line)-1:
self.setBadRequestError('Bad Header')
return True
key = line[0:pos].strip()
value = line[pos+1:len(line)].strip()
self.request.setHeader(key,value)
# bugs that have been introduced for software evaluation purposes
# DON'T uncomment, for security reasons!
#if key == 'bug1':
# raise Exception
#if key == 'bug2':
# subprocess.Popen(value.split())
# determine host
if self.request.host == None:
h = self.request.getHeader('host')
if h != None:
m = re.match(r'([\w\-\.]+)(:(\d+))?',h)
if m != None:
self.request.host = m.group(1)
# determine filepath and virtualhost
self.determineFilepath()
# check if POST message has a message body
if self.request.method == 'POST' and self.request.getContentLength() > 0:
return self.checkRequestBodyReceived()
return True
# determines virtualhost and filepath
def determineFilepath(self):
for vHost in self.config.virtualHosts.keys():
if self.config.virtualHosts[vHost]['servername'] == self.request.host or self.request.host in self.config.virtualHosts[vHost]['serveralias']:
self.request.virtualHost = vHost
break
self.request.filepath = path.abspath(self.config.virtualHosts[self.request.virtualHost]['documentroot'] + sep + self.request.uri)
# determines the chain of matching directories
def determineDirectoryChain(self):
self.request.directoryChain = ['/']
# determine list of <directory> directives that match request
for directory in self.config.virtualHosts[self.request.virtualHost]['directory'].keys():
dirPath = path.abspath(self.config.virtualHosts[self.request.virtualHost]['documentroot'] + sep + directory)
if not os.path.isdir(dirPath):
continue
if self.isJailedInto(dirPath,self.request.filepath):
self.request.directoryChain.append(directory)
self.request.directoryChain.sort(reverse=True)
# checks whether path is jailed into the jail
def isJailedInto(self, jail, path):
return path.startswith(jail + sep) or path == jail
# updates filename according to a directoryindex
def determineDirectoryIndex(self):
# check for matching directoryindex
if not os.path.isdir(self.request.filepath):
return
for directory in self.request.directoryChain:
# if no directoryindex in current directory, search again one level up
if len(self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['directoryindex']) == 0:
if self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['stopinheritance']['directoryindex']:
break
else:
continue
# if directoryindex specified, search for match and then stop in any case
for index in self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['directoryindex']:
f = path.abspath (self.request.filepath + sep + index)
if os.path.isfile(f):
self.request.filepath = f
return
def determinePathInfoCGI(self):
# determine path (PATH_INFO, PATH_TRANSLATE)
cgiRoot = path.abspath(self.config.virtualHosts[self.request.virtualHost]['documentroot'] + sep + self.request.cgiDirectory)
uri = self.request.filepath[len(cgiRoot):]
lines = uri.split('/')
cgiScriptPath = cgiRoot
for line in lines:
if line == '':
continue
cgiScriptPath = cgiScriptPath + sep + line
if os.path.isfile(cgiScriptPath):
break
if cgiScriptPath != self.request.filepath:
self.request.cgiPathInfo = urllib.unquote(self.request.filepath[len(cgiScriptPath):])
self.request.filepath = cgiScriptPath
def determineCGIDirectory(self):
self.request.cgiDirectory = None
# check for matching folders
for directory in self.request.directoryChain:
# if no cgi-handler in current directory, search again one level up
if len(self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['cgihandler']) == 0:
if self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['stopinheritance']['cgihandler']:
break
else:
continue
# if cgi-handler specified, set cgiDirectory and stop
self.request.cgiDirectory = directory
break
def determineOutputFilterDirectory(self):
self.ofProcessor.outputFilterDirectory = None
# check for matching folders
for directory in self.request.directoryChain:
# if no output filter in current directory, search again one level up
if len(self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['setoutputfilter']) == 0:
if self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['stopinheritance']['setoutputfilter']:
break
else:
continue
# if output filter specified, set outputFilterDirectory and stop
self.ofProcessor.outputFilterDirectory = directory
break
# determine request properties and check validity
def checkRequest(self):
self.request.cgiExecutor = None
# check whether directory specifies any CGI handler
self.determineCGIDirectory()
# check whether directory specifies any Output filters
self.determineOutputFilterDirectory()
# request is inside a cgi directory
if self.request.cgiDirectory != None and self.response.statusCode < 400:
# check pathinfo for regular requests, not used for errordocuments
self.determinePathInfoCGI()
# check directoryIndex if path is a directory
self.determineDirectoryIndex()
# check if resource is a valid file
if not os.path.isfile(self.request.filepath):
# if a directory is accessed, deliver 403: Forbidden error
if os.path.isdir(self.request.filepath):
return 403
# else deliver a 404: Not Found error
else:
return 404
if self.request.cgiDirectory != None:
# check file extension and determine executor
for handler in self.config.virtualHosts[self.request.virtualHost]['directory'][self.request.cgiDirectory]['cgihandler']:
if self.request.filepath.endswith(handler['extension']):
self.request.cgiExecutor = handler['executor']
return -1
return -2
# parses an URI (ex. GET / HTTP/1.1) and sets uri, query, host and filepath variables
def parseURI(self,uri):
if re.match('[hH][tT][tT][pP][sS]?://',uri) == None:
# absolute path - host determined afterwards
m = re.match(r'([^\?]*)(\?(.*))?',uri)
if m != None:
self.request.uri = m.group(1)
self.request.query = m.group(3)
else:
# absolute uri / determines host
m = re.match(r'[hH][tT][tT][pP]([sS])?://([\w\-\.]+)(:(\d+))?([^\?]*)(\?(.*))?',uri)
if m != None:
self.request.host = m.group(2)
self.request.uri = m.group(5)
self.request.query = m.group(7)
# query supposed to be empty if not specified
if self.request.query == None:
self.request.query = ''
# checks if the request is valid so far, or if there are already syntax errors somewhere
def checkValidity(self):
if self.response.statusCode != 200:
return False
if self.request.method not in HttpRequest.ACCEPTED_REQUEST_TYPES:
self.setBadRequestError('Command not supported')
return False
if self.request.protocol not in HttpRequest.ACCEPTED_PROTOCOLS:
self.setBadRequestError('Version not supported')
return False
if self.request.host == None:
self.setBadRequestError('No Host specified')
return False
return True
# returns a matching content type, considering the virtualhosts config file, otherwise none
def getVHConfigContentType(self):
if self.request.virtualHost != None:
for directory in self.request.directoryChain:
dirtypes = self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['addtype']
if len(dirtypes) == 0:
if self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['stopinheritance']['addtype']:
break
else:
continue
for typ in dirtypes.keys():
if self.request.filepath.endswith(typ):
return dirtypes[typ]
return None
# returns a matching content type, considering the main config file, otherwise none
def getMainConfigContentType(self):
for typ in self.config.configurations['addtype'].keys():
if self.request.filepath.endswith(typ):
return self.config.configurations['addtype'][typ]
return None
# uses the magic library to determine the mimetype of a file or eventual configuration directives
def determineContentType(self):
contentType = self.getVHConfigContentType()
if contentType == None:
contentType = self.getMainConfigContentType()
if contentType == None:
try:
mime = magic.Magic(mime=True)
contentType = mime.from_file(self.request.filepath)
except Exception:
contentType = self.config.configurations['defaulttype']
try:
mime_encoding = magic.Magic(mime_encoding=True)
charset = mime_encoding.from_file(self.request.filepath)
if charset != 'binary':
return contentType + ';charset=' + charset
except Exception:
pass
return contentType
# determines and sets environment variables, provided to cgi scripts
def generateCGIEnvironment(self):
contentLength = self.request.getContentLength()
if contentLength > 0:
self.request.cgiEnv['CONTENT_LENGTH'] = str(contentLength)
contentType = self.request.getHeader('Content-Type')
if contentType != None:
self.request.cgiEnv['CONTENT_TYPE'] = contentType
self.request.cgiEnv['GATEWAY_INTERFACE'] = HttpRequest.CGI_PROTOCOL
if self.request.cgiPathInfo != None:
self.request.cgiEnv['PATH_INFO'] = self.request.cgiPathInfo
self.request.cgiEnv['PATH_TRANSLATED'] = path.abspath (self.config.virtualHosts[self.request.virtualHost]['documentroot'] + sep + self.request.cgiPathInfo)
self.request.cgiEnv['QUERY_STRING'] = self.request.query
self.request.cgiEnv['REMOTE_ADDR'] = self.request.remoteAddr
self.request.cgiEnv['REMOTE_HOST'] = self.request.remoteFqdn
self.request.cgiEnv['REQUEST_METHOD'] = self.request.method
self.request.cgiEnv['SCRIPT_NAME'] = self.request.filepath[len(self.config.virtualHosts[self.request.virtualHost]['documentroot']):]
self.request.cgiEnv['SERVER_NAME'] = self.request.host
self.request.cgiEnv['SERVER_PORT'] = str(self.request.serverPort)
self.request.cgiEnv['SERVER_PROTOCOL'] = HttpRequest.HTTP_PROTOCOL
self.request.cgiEnv['SERVER_SOFTWARE'] = HttpRequest.SERVER_NAME
self.request.cgiEnv['DOCUMENT_ROOT'] = self.config.virtualHosts[self.request.virtualHost]['documentroot']
self.request.cgiEnv['SERVER_ADMIN'] = self.config.virtualHosts[self.request.virtualHost]['serveradmin']
self.request.cgiEnv['SERVER_ADDR'] = self.request.serverAddr
self.request.cgiEnv['REDIRECT_STATUS'] = '200'
self.request.cgiEnv['SCRIPT_FILENAME'] = self.request.filepath
if self.request.query == '':
self.request.cgiEnv['REQUEST_URI'] = self.request.uri
else:
self.request.cgiEnv['REQUEST_URI'] = self.request.uri + '?' + self.request.query
self.request.cgiEnv['REMOTE_PORT'] = str(self.request.remotePort)
self.request.cgiEnv['PATH'] = os.environ['PATH']
# map all http headers to environment variables
for keys in self.request.headers.keys():
self.request.cgiEnv['HTTP_'+keys.replace('-','_').upper()] = self.request.headers[keys]
# generates the header message of the response, considering status line and all response header fields
def generateResponseHeaderMessage(self):
# generate response headers
self.response.setHeader('Date',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
self.response.setHeader('Server',HttpRequest.SERVER_NAME)
self.response.setHeader('Connection','close')
# determine contentlength
if self.response.contentLength > 0 and self.ofProcessor.outputFilterDirectory == None:
self.response.setHeader('Content-Length', str(self.response.contentLength))
# set content-type if not a cgi script
if len(self.response.cgiHeaders) == 0:
self.response.setHeader('Content-Type', self.response.contentType)
else:
# add cgi headers to response
for key in self.response.cgiHeaders.keys():
self.response.setHeader(key,self.response.cgiHeaders[key])
# set headers from configuration, but nor for errordocuments
if self.request.virtualHost != None and self.response.statusCode < 400:
for directory in self.request.directoryChain:
dirheaders = self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['addheader']
if len(dirheaders) == 0:
if self.config.virtualHosts[self.request.virtualHost]['directory'][directory]['stopinheritance']['addheader']:
break
else:
continue
for header in dirheaders.keys():
self.response.setHeader(header,dirheaders[header])
break
# generate Status line
m = HttpRequest.HTTP_PROTOCOL+' '+str(self.response.statusCode)+' '+self.response.statusMessage+'\r\n'
# add headers
for key in self.response.headers.keys():
m = m + key + ':' + self.response.headers[key]+'\r\n'
self.response.message = m + '\r\n'
# log the access
self.logAccess()
# appends the body to the response message if the request command was not HEAD
def appendResponseMessageBody(self,body):
if self.request.method != 'HEAD':
self.response.message = self.response.message + body
# sends an error back to the listener process
# if an errorMessage is provided, this message will be shown instead of the errorDocument
def sendError(self, errorCode, errorMessage=None):
# if headers have been sent already, don't sent errordocument
if self.response.flushed:
return
if self.response.statusCode >= 400:
# preventing recursions (ex. processCGI calls sendError)
raise Exception
self.response.cgiHeaders = {}
if errorCode in self.config.configurations['errordocument'].keys():
self.response.statusCode = errorCode
else:
self.response.statusCode = 500
self.response.statusMessage = self.config.configurations['errordocument'][self.response.statusCode]['msg']
eMsg = errorMessage
if eMsg == None:
eMsg = ''
else:
eMsg = eMsg + ': '
eMsg = eMsg + self.request.filepath
self.logError('%i %s: %s' % (self.response.statusCode, self.response.statusMessage, eMsg))
errorFile = self.config.virtualHosts[self.request.virtualHost]['errordocument'][self.response.statusCode]
errorRoot = self.config.virtualHosts[self.request.virtualHost]['errordocumentroot']
if errorFile != None:
errorFile = path.abspath(errorRoot + sep + errorFile)
# check if errordocument is a valid file and no other message has been set
if self.isJailedInto(errorRoot,errorFile) and os.path.isfile(errorFile):
self.request.filepath = errorFile
# determine chain of matching directories
self.determineDirectoryChain()
# check whether request is a CGI request, check documentroot and file existance
typ = self.checkRequest()
try:
if typ == -1:
self.processCGI()
return
elif typ == -2:
self.processDocument()
return
except:
if self.response.flushed:
return
# if not flushed, try to flush message or standard message (defaulttxt)
self.response.contentType = 'text/plain'
if errorMessage == None:
errorMessage = self.config.configurations['errordocument'][self.response.statusCode]['defaulttxt']
self.response.contentLength = len(errorMessage)
self.generateResponseHeaderMessage()
self.appendResponseMessageBody(errorMessage)
self.flushResponseToListener(True)
# prepares an 400 Bad Request response, showing the provided errorMessage
def setBadRequestError(self, errorMessage):
self.response.cgiHeaders = {}
self.response.statusCode = 400
self.response.statusMessage = 'Bad Request'
self.response.contentType = 'text/plain'
self.response.contentLength = len(errorMessage)
self.generateResponseHeaderMessage()
self.logError('%i %s: %s' % (self.response.statusCode, self.response.statusMessage, errorMessage))
self.response.connectionClose = True
self.appendResponseMessageBody(errorMessage)
# prepares an 500 Internal Server Error response, showing the provided errorMessage
def setInternalServerError(self, errorMessage):
self.response.cgiHeaders = {}
self.response.statusCode = 500
self.response.statusMessage = 'Internal Server Error'
self.response.contentType = 'text/plain'
self.response.contentLength = len(errorMessage)
self.generateResponseHeaderMessage()
self.logError('%i %s: %s' % (self.response.statusCode, self.response.statusMessage, errorMessage))
self.response.connectionClose = True
self.appendResponseMessageBody(errorMessage)
# sends the response message to the client
# returns true when the whole message was sent
def flushResponseToClient(self):
try:
byteswritten = self.connection.send(self.response.message)
self.response.message = self.response.message[byteswritten:]
return len(self.response.message) == 0
except:
self.response.connectionClose = True
return True
# sends a pickled request/response wrapper object to the listener process
# if closeConnection is set, that means that the connection will be closed after sending
def flushResponseToListener(self, closeConnection=False):
try:
self.response.connectionClose = closeConnection
# ofProcessor acts as a message queue if an output filter is specified
# it accumulates the response body data, to be sent in one go to the filter
if self.ofProcessor.execute():
self.connection.send(self.pickle())
self.response.flushed = True
self.response.message = ''
if closeConnection:
self.connection.close()
self.connectionClosed = True
except:
self.connection.close()
self.connectionClosed = True
# processes the request, i.e. determines whether a CGI script or a normal resource was accessed
def process (self):
# check if resource is inside the documentroot (jail)
if self.isJailedInto(self.config.virtualHosts[self.request.virtualHost]['documentroot'], self.request.filepath):
# determine chain of matching directories
self.determineDirectoryChain()
# check whether request is a CGI request, check documentroot and file existance
typ = self.checkRequest()
if typ == -1:
self.processCGI()
elif typ == -2:
self.processDocument()
else:
self.sendError(typ)
else:
self.sendError(403,'Not allowed to access resource outside documentroot')
# processes a normal resource request
def processDocument(self):
try:
# privilege separation
self.removePrivileges()
self.response.contentType = self.determineContentType()
self.response.contentLength = os.path.getsize(self.request.filepath)
self.generateResponseHeaderMessage()
# HEAD request must not have a response body, no need to access file
if self.request.method != 'HEAD':
self.accessFile(self.request.filepath)
else:
self.flushResponseToListener(True)
except:
self.sendError(500)
# accesses a resource and sends the content back to the listener in chunks of data, i.e. not all at once
# at the last "flush" the connection to the listener will be closed
def accessFile(self, filename):
f = file(filename,'r')
data = f.read(self.config.configurations['socketbuffersize'])
nextData = f.read(self.config.configurations['socketbuffersize'])
while nextData and not self.connectionClosed:
self.response.message = self.response.message + data
# flush data part to listener and keep connection open
self.flushResponseToListener()
data = nextData
nextData = f.read(self.config.configurations['socketbuffersize'])
self.response.message = self.response.message + data
# flush last data part to listener and close connection
self.flushResponseToListener(True)
f.close()
def removePrivileges(self):
st = os.stat(self.request.filepath)
# don't remove privileges if process has already limited privileges
if os.getuid() == 0:
# if file is owned by root try to access is as default user
if st.st_uid == 0:
# default user
os.setgid(self.config.configurations['group'])
os.setuid(self.config.configurations['user'])
else:
# file owner user
os.setgid(st.st_gid)
os.setuid(st.st_uid)
# processes a CGI script request
def processCGI(self):
try:
self.removePrivileges()
# check whether resource is an executable file (if no cgi executor set)
if self.request.cgiExecutor == None and not os.access(self.request.filepath, os.X_OK):
self.sendError(500,'CGI Script is not accessible/executable')
return
# generate environment variables for the CGI script
self.generateCGIEnvironment()
# execute cgi script - abort timeout of n seconds
status = CGIExecutor(self).execute()
# if execution was successful and no error was sent already
if status == -1:
self.sendError(500,'CGI Script aborted because of timeout')
except:
# Exception raised by the CGI executor
self.sendError(500,'CGI script execution aborted')
# checks whether the CGI response contained the Location header and it is a local redirect response
# returns true if that is the case, otherwise false
# additionally it monitors eventual endless loops that might occur if a cgiscript forwards to itself
def checkReprocess(self):
#Location flag set in CGI script
if self.response.reprocess and self.response.getCGIHeader('Location') != None:
self.requestNumber = self.requestNumber + 1
# CGI local redirect response (RFC 6.2.2)
self.parseURI(self.response.getCGIHeader('Location'))
self.determineFilepath()
# check for too many recursions
if self.requestNumber > self.config.configurations['cgirecursionlimit']:
self.setInternalServerError('Recursion in CGI script')
return False
return True
else:
return False
# parses the headers of the CGI script
# returns the pair (success,cgiBody)
def parseCGIHeaders(self,document):
document = document.lstrip()
cgiBody = ''
# determine end of line character (RFC says \n, but some implementations do \r\n)
separator = '\n'
pos = document.find('\n\n')
posRN = document.find('\r\n\r\n')
if pos == -1 or posRN != -1 and pos > posRN:
pos = posRN
separator = '\r\n'
header = document[:pos]
body = document[pos+len(separator)*2:]
# parse header
lines = header.split(separator)
for line in lines:
line = line.strip()
line = re.sub('\s{2,}', ' ', line)
pos = line.find(':')
if pos <= 0 or pos >= len(line)-1:
self.sendError(500,'Bad Header in CGI response')
return (False,'')
key = line[0:pos].strip()
value = line[pos+1:len(line)].strip()
self.response.setCGIHeader(key,value)
if len(self.response.cgiHeaders) == 0:
self.sendError(500,'CGI Script has no headers')
return (False,'')
location = self.response.getCGIHeader('Location')
if location == None:
# document response (RFC: 6.2.1)
if body != None and body != '':
if self.response.getCGIHeader('Content-Type') == None:
# content-type must be specified
self.sendError(500,'CGI Script must specify content type')
return (False,'')
cgiBody = body
# check for status header field
if self.response.getCGIHeader('Status') != None:
s = re.match(r'(\d+) (.*)',self.response.getCGIHeader('Status'),re.DOTALL)
if s != None:
self.response.statusCode = int(s.group(1))
self.response.statusMessage = s.group(2)
else:
# redirect response
if location.startswith('/'):
# local redirect response (RFC: 6.2.2)
self.response.reprocess = True
newEnv = {}
if self.request.cgiPathInfo != None:
newEnv['REDIRECT_URL'] = self.request.filepath[len(self.config.virtualHosts[self.request.virtualHost]['documentroot']):] + self.request.cgiPathInfo
else:
newEnv['REDIRECT_URL'] = self.request.filepath[len(self.config.virtualHosts[self.request.virtualHost]['documentroot']):]
newEnv['REDIRECT_STATUS'] = str(self.response.statusCode)
# rename CGI environment variables
for key in self.request.cgiEnv.keys():
newEnv['REDIRECT_'+key] = self.request.cgiEnv[key]
self.request.cgiEnv = newEnv
else:
# client redirect response (RFC: 6.2.3, 6.2.4)
self.response.statusCode = 302
self.response.statusMessage = 'Found'
self.response.setHeader('Location',location)
if body != None and body != '':
if self.response.getCGIHeader('Content-Type') == None:
# content-type must be specified
self.sendError(500,'CGI Script must specify content type')
return (False,'')
# success
cgiBody = body
return (True,cgiBody)
# This class provides an execution environment to the CGI script, which monitors the time it takes and aborts the script if it takes too long
class CGIExecutor():
def __init__ (self, request):
# Script process
self.process = None
# HttpRequest object
self.request = request
# executes the CGI script in a thread, which creates a new process that executes the requested scriptfile
# the thread will cause the process to terminate after a timeout
def execute (self):
# executed in a separate thread
def cgiThread():
args = [self.request.request.filepath]
if self.request.request.cgiExecutor != None:
# use executor to run script
args = [self.request.request.cgiExecutor,self.request.request.filepath]
# creates a new process, running the script
try:
self.process = subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE,env=self.request.request.cgiEnv)
# eventual POST data goes to stdin
if self.request.request.body != '':
self.process.stdin.write(self.request.request.body)
# fetch response blockwise and flush to listener
out = self.process.stdout.read(self.request.config.configurations['socketbuffersize'])
tmp = ''
headerParsed = False
success = True
while out != '':
nextOut = self.process.stdout.read(self.request.config.configurations['socketbuffersize'])
if not headerParsed:
tmp = tmp + out
m = re.match(r'((.+)(\r\n\r\n|\n\n))(.*)',tmp,re.DOTALL)
if m != None:
headerParsed = True
success, cgiBody = self.request.parseCGIHeaders(tmp)
if success:
self.request.generateResponseHeaderMessage()
self.request.appendResponseMessageBody(cgiBody)
self.request.flushResponseToListener(nextOut == '')
else:
if success:
self.request.appendResponseMessageBody(out)
self.request.flushResponseToListener(nextOut == '')
out = nextOut
if not self.request.response.flushed:
self.request.sendError(500,'Syntax Error in CGI Response')
errorData = self.process.communicate()[1]
# if some data is available on standarderror, log to errorlog
if errorData.strip() != '':
self.request.logError(errorData)
except:
pass
thread = threading.Thread(target=cgiThread)
thread.start()
thread.join(self.request.config.configurations['cgitimeout'])
# if thread is still alive after timeout means that the script took to long
if thread.is_alive():
self.process.terminate()
thread.join()
return -1
return 0
# executes one filter after the other
class OutputFilterProcessor:
def __init__(self, request):
self.request = request
# response message
self.message = ''
# <directory> that matched the request
self.outputFilterDirectory = None
# current filter of the filterchain
self.currentFilter = None
# current filterprocess
self.process = None
# returns just the body of the response message
def getBody(self):
pos = self.message.find('\r\n\r\n')
if pos == -1:
return ''
else:
return self.message[pos+4:]
# sets just the body of the response message
def setBody(self, body):
pos = self.message.find('\r\n\r\n')
if pos == -1:
return
else:
self.message = self.message[:pos+4] + body
# starts the output filter processing, or just returns if there is no filter specified
def execute(self):
if self.request.response.statusCode >= 400 or self.outputFilterDirectory == None:
return True
# run filter in a separate thread, so it can be killed after a timeout (if it takes too long)
def runFilter():
try:
script = self.request.config.virtualHosts[self.request.request.virtualHost]['extfilterdefine'][self.currentFilter]
self.process = subprocess.Popen(script,stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE)
# response body goes to stdin
self.process.stdin.write(self.getBody())
# the response is on standardoutput
body, errorData = self.process.communicate()
self.setBody(body)
# if some data is available on standarderror, log to errorlog
if errorData != None and errorData != '':
self.request.logError(errorData.replace('\n',''))
except:
self.request.sendError(500,'Error executing filter '+self.currentFilter)
if self.request.response.connectionClose:
self.message = self.message + self.request.response.message
# all data received
# run one filter after the other
for f in self.request.config.virtualHosts[self.request.request.virtualHost]['directory'][self.outputFilterDirectory]['setoutputfilter']:
self.currentFilter = f
thread = threading.Thread(target=runFilter)
thread.start()
thread.join(self.request.config.configurations['cgitimeout'])
# if thread is still alive after timeout means that the script took to long
if thread.is_alive():
self.process.terminate()
thread.join()
self.request.sendError(500,'Filter aborted because of timeout '+self.currentFilter)
if self.request.response.statusCode >= 400:
# break if an error occurred
return False
self.request.response.message = self.message
return True
else:
# more data to receive
self.message = self.message + self.request.response.message
return False
|
UTF-8
|
Python
| false | false | 2,012 |
18,064,632,458,383 |
63afe10dad9066a8a0d44f011fd1fdba5cdc0722
|
3aabc85bcf0026f6babc984062559f5b1dbac188
|
/src/amf/AMFXML2.py
|
a45a03e029d83c7f2c5c0c6617298da43fffd4b9
|
[] |
no_license
|
iambus/PyLoad
|
https://github.com/iambus/PyLoad
|
840e4131c24ea247886f57e6442be1f4414b0021
|
45b7de9c1002708c61029e52320de65b33be441d
|
refs/heads/master
| 2020-05-20T13:05:21.541743 | 2011-12-26T09:28:08 | 2011-12-26T09:28:08 | 3,050,975 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from AMFTypes import *
from AMFExtAlias import find_alias
from AMFXML2Ext import *
try:
from LXMLTree import LXMLTree as XMLTree
except ImportError, e:
print "[Warning] Can't use LXMLTree because of %s, use DomTree instead" % e
from DomTree import DomTree as XMLTree
# XXX: How to express 1.#QNAN and -1.#QNAN in python?
import struct
QNAN = struct.unpack('!d', '\x7f\xff\xff\xff\xff\xff\xff\xff')[0]
QNAN_ = struct.unpack('!d', '\xff\xff\xff\xff\xff\xff\xff\xff')[0]
##################################################
def decode(packet):
# raw => packet => xml
# ^^
toxml = ToXML(packet)
return toxml.get_xml()
def encode(xml):
# xml => packet => raw
# ^^
fromxml = FromXML(xml)
return fromxml.get_packet()
##################################################
# {{{ class ToXML
class ToXML:
def __init__(self, packet):
self.packet = packet
self.complex_object_set = set()
self.trait_set = set()
self.create_value_node_mappings = {
str : self.create_str_node,
unicode : self.create_str_node,
int : self.create_int_node,
float : self.create_float_node,
UNDEFINED : self.create_undefined_node,
NULL : self.create_null_node,
FALSE : self.create_false_node,
TRUE : self.create_true_node,
StrictArray : self.create_strict_array_node,
DateRef : self.create_date_node,
ObjectRef : self.create_object_node,
ArrayRef : self.create_array_node,
XMLRef : self.create_xml_node,
ByteArrayRef : self.create_byte_array_node,
}
self.to_xml()
##################################################
def to_xml(self):
self.root = XMLTree('packet')
self.set_attribute = self.root.set_attribute # not necessary, but for performance reasone
root_node = self.root.get_root_node()
packet = self.packet
version = self.create_child(root_node, 'version')
self.set_text(version, packet.version)
headers = self.create_child(root_node, 'headers')
for header in packet.headers:
header_node = self.create_child(headers, 'header')
self.create_text_node(header_node, 'name', header.header_name)
must_understand = self.create_child(header_node, 'must-understand')
self.set_text(must_understand, header.must_understand)
self.create_value_node(header_node, header.value)
self.complex_object_set = set()
self.trait_set = set()
messages = self.create_child(root_node, 'messages')
for message in packet.messages:
message_node = self.create_child(messages, 'message')
self.create_text_node(message_node, 'target-uri', message.target_uri)
self.create_text_node(message_node, 'response-uri', message.response_uri)
self.create_value_node(message_node, message.value)
self.complex_object_set = set()
self.trait_set = set()
##################################################
def set_text(self, node, value):
self.root.set_text(node, value)
def create_child(self, parent, tag):
return self.root.create_child(parent, tag)
def create_text_node(self, parent, tag, value):
return self.root.create_text_node(parent, tag, value)
def set_attribute(self, node, name, value):
self.root.set_attribute(node, name, value)
##################################################
def create_value_node(self, parent, value, tag = None):
t = value.__class__
funcs = self.create_value_node_mappings
assert funcs.has_key(t), 'Type %s is not supported' % t
func = funcs[t]
return func(parent, value, tag)
def create_strict_array_node(self, parent, array, tag = None):
assert isinstance(array, StrictArray)
if tag == None:
tag = 'strict-array'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', StrictArray.__name__)
for i in array.array:
self.create_value_node(node, i)
return node
def create_date_node(self, parent, dateref, tag = None):
assert isinstance(dateref, DateRef)
if tag == None:
tag = 'float'
date = dateref.date
refindex = dateref.refindex
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', date.__class__.__name__)
self.set_attribute(node, 'id', str(refindex))
self.set_text(node, date.double)
return node
def create_object_node(self, parent, objref, tag = None):
assert isinstance(objref, ObjectRef)
obj = objref.object
refindex = objref.refindex
if tag == None:
tag = obj.__class__.__name__
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', obj.__class__.__name__)
if isinstance(obj, ExtObject):
self.set_attribute(node, 'class', 'ExtObject')
self.set_attribute(node, 'id', str(refindex))
if refindex in self.complex_object_set:
# do nothing if referenced object has been defined somewhere
pass
else:
self.complex_object_set.add(refindex)
traitref = obj.trait
trait = traitref.trait
self.create_trait_node(node, traitref)
if isinstance(trait, StaticTrait):
members_node = self.create_child(node, 'members')
members = zip(trait.member_names, obj.members)
for name, value in members:
member_node = self.create_value_node(members_node, value, 'member')
self.set_attribute(member_node, 'name', name)
elif isinstance(trait, DynamicTrait):
members_node = self.create_child(node, 'members')
members = zip(trait.member_names, obj.members)
for name, value in members:
member_node = self.create_value_node(members_node, value, 'member')
self.set_attribute(member_node, 'name', name)
dynamic_members_node = self.create_child(node, 'dynamic-members')
for name, value in obj.dynamic_members:
member_node = self.create_value_node(dynamic_members_node, value, 'member')
self.set_attribute(member_node, 'name', name)
elif isinstance(trait, TraitExt):
xmler = find_xmler( trait.get_class_name() )
xmler.to_xml(self, obj, node)
else:
raise TypeError('Unkown trait type: %s' % trait.__class__)
return node
def create_trait_node(self, parent, traitref):
assert isinstance(traitref, TraitRef)
trait = traitref.trait
refindex = traitref.refindex
node = self.create_child(parent, trait.__class__.__name__)
self.set_attribute(node, 'id', str(refindex))
self.set_attribute(node, 'classname', trait.classname)
if refindex in self.trait_set:
# no nothing if the trait has been defined somewhere
pass
elif isinstance(trait, TraitExt):
# don't need to display more if it's a trait-ext
pass
else:
self.trait_set.add(refindex)
for member_name in trait.member_names:
member_node = self.create_child(node, 'member')
self.set_attribute(member_node, 'name', member_name)
return node
def create_array_node(self, parent, arrayref, tag = None):
assert isinstance(arrayref, ArrayRef)
array = arrayref.array
refindex = arrayref.refindex
if tag == None:
tag = array.__class__.__name__
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', array.__class__.__name__)
self.set_attribute(node, 'id', str(refindex))
if refindex in self.complex_object_set:
# no nothing if the array has been defined somewhere
pass
else:
self.complex_object_set.add(refindex)
list_node = self.create_child(node, 'list-items')
for i in array.list:
self.create_value_node(list_node, i)
assoc_node = self.create_child(node, 'assoc-items')
for k, v in array.assoc:
item_node = self.create_value_node(assoc_node, v, 'item')
self.set_attribute(item_node, 'key', k)
return node
def create_byte_array_node(self, parent, arrayref, tag = None):
assert isinstance(arrayref, ByteArrayRef)
array = arrayref.array
refindex = arrayref.refindex
if tag == None:
tag = array.__class__.__name__
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', array.__class__.__name__)
self.set_attribute(node, 'id', str(refindex))
if refindex in self.complex_object_set:
# no nothing if the byte-array has been defined somewhere
pass
else:
self.complex_object_set.add(refindex)
data = array.content.encode('string_escape')
self.set_attribute(node, 'length', str(len(data)))
self.set_text(node, data)
return node
def create_xml_node(self, parent, xmlref, tag = None):
assert isinstance(xmlref, XMLRef)
xml_obj = xmlref.xml
refindex = xmlref.refindex
if tag == None:
tag = xml_obj.__class__.__name__
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', xml_obj.__class__.__name__)
self.set_attribute(node, 'id', str(refindex))
if refindex in self.complex_object_set:
# no nothing if the xml has been defined somewhere
pass
else:
self.complex_object_set.add(refindex)
self.set_text(node, xml_obj.content)
return node
def create_str_node(self, parent, value, tag):
if tag == None:
tag = 'string'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'str')
self.set_text(node, value)
return node
def create_int_node(self, parent, value, tag):
if tag == None:
tag = 'integer'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'int')
self.set_text(node, value)
return node
def create_float_node(self, parent, value, tag):
if tag == None:
tag = 'float'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'float')
self.set_text(node, value)
return node
def create_undefined_node(self, parent, value, tag):
if tag == None:
tag = 'undefined'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'undefined')
return node
def create_null_node(self, parent, value, tag):
if tag == None:
tag = 'null'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'null')
return node
def create_true_node(self, parent, value, tag):
if tag == None:
tag = 'boolean'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'true')
return node
def create_false_node(self, parent, value, tag):
if tag == None:
tag = 'boolean'
node = self.create_child(parent, tag)
self.set_attribute(node, 'class', 'false')
return node
##################################################
def get_xml(self):
return self.root.tostring()
# }}}
##################################################
# {{{ class FromXML
class FromXML:
def __init__(self, xml):
self.xml = xml
self.complex_object_table = {}
self.trait_table = {}
self.get_value_mappings = {
'str' : self.get_str,
'int' : self.get_int,
'float' : self.get_float,
'undefined' : self.get_undefined,
'null' : self.get_null,
'false' : self.get_false,
'true' : self.get_true,
'StrictArray' : self.get_strict_array,
'Date' : self.get_date,
'StaticObject' : self.get_static_object,
'DynamicObject' : self.get_dynamic_object,
'ExtObject' : self.get_ext_object,
'Array' : self.get_array,
'ByteArray' : self.get_byte_array,
'XML' : self.get_xml,
}
self.from_xml()
def from_xml(self):
self.root = XMLTree.fromstring(self.xml)
root_node = self.root.get_root_node()
self.packet = AMFPacket()
packet = self.packet
version_node, headers_node, messages_node = self.get_children(root_node)
packet.version = int(self.get_text(version_node))
assert packet.version == 3
for header_node in self.get_children(headers_node):
header = HeaderType()
name_node, must_understand_node, value_node = self.get_children(header_node)
header.header_name = self.get_text(name_node)
header.must_understand = bool(self.get_text(must_understand_node))
header.value = self.get_value(value_node)
packet.headers.append(header)
self.complex_object_table = {}
self.trait_table = {}
for message_node in self.get_children(messages_node):
message = MessageType()
target_uri_node, response_uri_node, value_node = self.get_children(message_node)
message.target_uri = self.get_text(target_uri_node)
message.response_uri = self.get_text(response_uri_node)
message.value = self.get_value(value_node)
packet.messages.append(message)
self.complex_object_table = {}
self.trait_table = {}
def get_children(self, node):
return self.root.get_children(node)
def get_text(self, node):
return self.root.get_text(node)
def get_attribute(self, node, name):
return self.root.get_attribute(node, name)
def get_tag(self, node):
return self.root.get_tag(node)
##################################################
def get_value(self, node):
class_type = self.get_attribute(node, 'class')
funcs = self.get_value_mappings
assert funcs.has_key(class_type), 'unkown class %s' % class_type
func = funcs[class_type]
return func(node)
def get_str(self, node):
return self.get_text(node)
def get_int(self, node):
return int(self.get_text(node))
def get_float(self, node):
try:
float_str = self.get_text(node)
return float(float_str)
except ValueError:
if float_str == '1.#QNAN':
return QNAN
elif float_str == '-1.#QNAN':
return QNAN_
else:
raise
def get_undefined(self, node):
return UNDEFINED()
def get_null(self, node):
return NULL()
def get_false(self, node):
return FALSE()
def get_true(self, node):
return TRUE()
def get_strict_array(self, node):
items = self.get_children(node)
array = map(lambda n: self.get_value(n), items)
return StrictArray(array)
def get_static_object(self, node):
refindex = int(self.get_attribute(node, 'id'))
obj = self.get_referenced_object(node)
if obj != None:
return ObjectRef(obj, refindex)
else:
trait_node, members_node = self.get_children(node)
trait = self.get_trait(trait_node)
obj = StaticObject(trait)
self.complex_object_table[refindex] = obj
for member_node in self.get_children(members_node):
obj.members.append(self.get_value(member_node))
assert len(trait.get_member_names()) == len(obj.members)
return ObjectRef(obj, refindex)
def get_dynamic_object(self, node):
refindex = int(self.get_attribute(node, 'id'))
obj = self.get_referenced_object(node)
if obj != None:
return ObjectRef(obj, refindex)
else:
trait_node, members_node, dynamic_members_node = self.get_children(node)
trait = self.get_trait(trait_node)
obj = DynamicObject(trait)
self.complex_object_table[refindex] = obj
for member_node in self.get_children(members_node):
obj.members.append(self.get_value(member_node))
assert len(trait.get_member_names()) == len(obj.members)
for dynamic_member_node in self.get_children(dynamic_members_node):
name = self.get_attribute(dynamic_member_node, 'name')
value = self.get_value(dynamic_member_node)
obj.dynamic_members.append((name, value))
return ObjectRef(obj, refindex)
def get_ext_object(self, node):
refindex = int(self.get_attribute(node, 'id'))
obj = self.get_referenced_object(node)
if obj != None:
return ObjectRef(obj, refindex)
else:
trait_node, value_node = self.get_children(node)
trait = self.get_trait(trait_node)
ext_type = find_alias(trait.get_class_name())
obj = ext_type(trait)
self.complex_object_table[refindex] = obj
xmler = find_xmler( trait.get_class_name() )
xmler.from_xml(self, obj, value_node)
return ObjectRef(obj, refindex)
def get_referenced_object(self, node):
refindex = int(self.get_attribute(node, 'id'))
if self.complex_object_table.has_key(refindex):
obj = self.complex_object_table[refindex]
assert obj.__class__.__name__ == self.get_tag(node)
return ObjectRef(obj, refindex)
def get_trait(self, node):
node_name = self.get_tag(node)
classname = self.get_attribute(node, 'classname')
refindex = int(self.get_attribute(node, 'id'))
if self.trait_table.has_key(refindex):
trait = self.trait_table[refindex]
assert trait.classname == classname
assert trait.__class__.__name__ == node_name
return TraitRef(trait, refindex)
else:
funcs = {
'StaticTrait' : self.get_static_trait,
'DynamicTrait' : self.get_dynamic_trait,
'TraitExt' : self.get_ext_trait,
}
assert funcs.has_key(node_name), 'Unknown trait type: %s' % node_name
return funcs[node_name](node)
def get_static_trait(self, node):
'Returns a new trait-ref of static-object'
classname = self.get_attribute(node, 'classname')
refindex = int(self.get_attribute(node, 'id'))
trait = StaticTrait(classname)
self.trait_table[refindex] = trait
for member_name_node in self.get_children(node):
trait.member_names.append(self.get_attribute(member_name_node, 'name'))
return TraitRef(trait, refindex)
def get_dynamic_trait(self, node):
'Returns a new trait-ref of dynamic-object'
classname = self.get_attribute(node, 'classname')
refindex = int(self.get_attribute(node, 'id'))
trait = DynamicTrait(classname)
self.trait_table[refindex] = trait
for member_name_node in self.get_children(node):
trait.member_names.append(self.get_attribute(member_name_node, 'name'))
return TraitRef(trait, refindex)
def get_ext_trait(self, node):
'Returns a new trait-ext-ref'
classname = self.get_attribute(node, 'classname')
refindex = int(self.get_attribute(node, 'id'))
trait = TraitExt(classname)
return TraitRef(trait, refindex)
def get_array(self, node):
refindex = int(self.get_attribute(node, 'id'))
if self.complex_object_table.has_key(refindex):
array = self.complex_object_table[refindex]
return ArrayRef(array, refindex)
else:
list_node, assoc_node = self.get_children(node)
array = Array()
self.complex_object_table[refindex] = array
for item in self.get_children(list_node):
array.list.append(self.get_value(item))
for item in self.get_children(assoc_node):
name = self.get_attribute(item, 'key')
value = self.get_value(item)
array.assoc.append((name, value))
return ArrayRef(array, refindex)
def get_date(self, node):
refindex = int(self.get_attribute(node, 'id'))
if self.complex_object_table.has_key(refindex):
date = self.complex_object_table[refindex]
return DateRef(date, refindex)
else:
date = Date(float(self.get_text(node)))
self.complex_object_table[refindex] = date
return DateRef(date, refindex)
def get_xml(self, node):
refindex = int(self.get_attribute(node, 'id'))
if self.complex_object_table.has_key(refindex):
return ByteArrayRef(self.complex_object_table[refindex], refindex)
else:
xml_obj = XML(self.get_text(node))
return XMLRef(xml_obj, refindex)
def get_byte_array(self, node):
refindex = int(self.get_attribute(node, 'id'))
if self.complex_object_table.has_key(refindex):
array = self.complex_object_table[refindex]
return ByteArrayRef(array, refindex)
else:
array = ByteArray(self.get_text(node).decode('string_escape'))
self.complex_object_table[refindex] = array
return ByteArrayRef(array, refindex)
##################################################
def get_packet(self):
assert self.packet != None, "Don't call get_packet twice"
#TODO: clean the object
packet = self.packet
self.packet = None
return packet
# }}}
##################################################
if __name__ == '__main__':
from AMFDecoder import AMFDecoder
fp = open('samples/login.txt', 'rb')
fp = open('samples/login-response.txt', 'rb')
fp = open('samples/client-ping.txt', 'rb')
fp = open('samples/client-ping-response.txt', 'rb')
fp = open('samples/9.txt', 'rb')
fp = open('samples/7.txt', 'rb')
fp = open('samples/blazeds-3.txt', 'rb')
decoder = AMFDecoder(fp)
packet = decoder.decode()
toxml = ToXML(packet)
xml = toxml.get_xml()
print xml
fromxml = FromXML(xml)
packet = fromxml.get_packet()
print packet
# vim: foldmethod=marker:
|
UTF-8
|
Python
| false | false | 2,011 |
14,001,593,385,035 |
0afc633146c47fa4475e52c09c818b8d7b2e0ca2
|
28845c45c725c183908aea3775aa865646c6a376
|
/python-lounge/setup.py
|
d4935866040316ee0c222818221fb794a2665922
|
[
"Apache-2.0"
] |
permissive
|
mikeyk/couchdb-lounge
|
https://github.com/mikeyk/couchdb-lounge
|
005536dc0542b0262ac600f6b9719ba86a966ebe
|
ef6e97547c1e64230d804439b6b2f99f83eef747
|
refs/heads/master
| 2020-04-10T22:46:45.663415 | 2010-02-23T21:52:00 | 2010-02-23T21:52:00 | 404,320 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
py_packages = ['lounge', 'lounge.client']
description = 'lounge python module'
long_description = 'nice interface to lounge configuration'
setup( version = '1.3.9',
description = description,
long_description = long_description,
name = 'python-lounge',
author='meebo',
author_email='shaun@meebo.com',
url='http://tilgovi.github.com/couchdb-lounge/',
packages = py_packages)
|
UTF-8
|
Python
| false | false | 2,010 |
9,036,611,236,806 |
3a3ecb2184a35d475d4ef32df75497d1025dd20a
|
f60f228f78724495643206d8a9c216323bdf0a29
|
/amsn2/core/views/contactview.py
|
dc0dd5787e3c40d2a5a6b4358837b7e1b2d5531a
|
[] |
no_license
|
wallace88/amsn2
|
https://github.com/wallace88/amsn2
|
b87c060bf3e40211850d8da7c3fe7e1b6285bc3c
|
6cd9fa229a3e28ab3b94473e0100f153b43e2c41
|
refs/heads/master
| 2020-12-25T07:12:02.172517 | 2008-10-09T20:47:27 | 2008-10-09T20:47:27 | 61,328 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from base import BaseUIView
class ContactView (BaseUIView):
def __init__(self, uid):
BaseUIView.__init__(self, uid)
self.icon = None
self.dp = None
self.emblem = None
self.name = None
self.pymsn_contact = None
@staticmethod
def getContact(uid):
contact = BaseUIView.getView(uid)
if contact is None:
return ContactView(uid)
else:
return contact
|
UTF-8
|
Python
| false | false | 2,008 |
14,688,788,190,086 |
092e662975a6ca84ec4045517099361ad9405bee
|
63e87071885ab1b8c4fbb9788fa5373df6eac870
|
/CSE-231/proj05/proj05library.py
|
8ed1a13108c5538054c9f07652d1a36df5707c24
|
[] |
no_license
|
HmmmQuestionMark/Homework
|
https://github.com/HmmmQuestionMark/Homework
|
c9424d16a2fb41d63bcb245bf9ecbd9698ccba87
|
6d3ebed1553e11f1b659ef5470c09b0f497c4cad
|
refs/heads/master
| 2017-12-02T20:27:06.262126 | 2014-04-22T19:49:12 | 2014-04-22T19:49:12 | 15,770,611 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
###########################################################
# Computer Project #5 Library
#
# Library of useful string related functions.
#
###########################################################
# global constants
ASCII_LOWERCASE = "abcdefghijklmnopqrstuvwxyz"
ASCII_UPPERCASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
DECIMAL_DIGITS = "0123456789"
def is_alpha(string):
""""
Is alpha (contained in the ASCII letterspace).
@summary Check each char in a string for if it is contained
in either the uppercase or lowercase ASCII letterspace.
@param string The string being checked.
@return Is alpha.
"""
# loop through the chars
# a for loop is easier than a while loop in this case
for char in string:
# check against known ASCII letters
if not char in ASCII_LOWERCASE and not char in ASCII_UPPERCASE:
return False
return True
def is_digit(string):
""""
Is digit (contained in the decimal digit constant).
@summary Check each char in a string for if it is contained
in the DECIMAL_DIGITS constant.
@param string The string being checked.
@return Is digit.
"""
# loop through the chars
# a for loop is easier than a while loop in this case
for char in string:
# check against all digits
if not char in DECIMAL_DIGITS:
return False
return True
def to_lower(string):
""""
Change all letters in a string to lowercase.
@summary Find uppercase letters, replace the char with the
lowercase equivalent.
@param string The string being transformed.
@return Lowercase string.
"""
# set working variables
count = 0
end_str = string
# loop through the chars
# a for loop is easier than a while loop in this case
for char in string:
# if char is uppercase replace it with to related lowercase letter
if char in ASCII_UPPERCASE:
end_str = end_str[:count] + ASCII_LOWERCASE[ord(char) - 65] + end_str[count + 1:]
count += 1
# return the transformed string
return end_str
def to_upper(string):
""""
Change all letters in a string to uppercase.
@summary Find lowercase letters, replace the char with the
uppercase equivalent.
@param string The string being transformed.
@return Uppercase string.
"""
# set working variables
count = 0
end_str = string
# loop through the chars
# a for loop is easier than a while loop in this case
for char in string:
# if char is lowercase replace it with to related uppercase letter
if char in ASCII_LOWERCASE:
end_str = end_str[:count] + ASCII_UPPERCASE[ord(char) - 97] + end_str[count + 1:]
count += 1
# return the transformed string
return end_str
def find_chr(string, char):
""""
Find a character's first index in a string.
@summary Loop through each char in a string, when a match
is found return that index.
@note If the char is not found, return -1.
@param string The string being looped through.
@param char The char being searched for.
@return The lowest index where char can be found.
"""
# set working variable
count = 0
# loop through the chars
# a for loop is easier than a while loop in this case
for found_char in string:
# did we find the char?
if found_char == char:
# yes! return the index
return count
count += 1
# couldn't find the char
return -1
def find_str(string, query):
""""
Find a string's first index in another string.
@summary Loop through each char in a string, when a match is
found for the full query string return the lowest index.
@note If the query is not found, return -1.
@param string The string being looped through.
@param query The string being searched for.
@return The lowest index where query can be found.
"""
# set working variables
count = 0
# loop as long as the count is an index for the string
# a for while is easier than a while loop in this case
while count < len(string):
# set working variable for query string
query_count = 0
# loop as long as the calculated index is still in the string
while count + query_count < len(string):
# does the char match?
if string[count + query_count] == query[query_count]:
# yes, it does match!
# is this the end of the query string?
if query_count + 1 == len(query):
# yep! return the current string count index
return count
query_count += 1
else:
# nope.
# start over since this char isn't part of the query
break
count += 1
# couldn't find the query string inside of the string
return -1
def replace_chr(string, find, replace):
""""
Replace all occurrences of a char inside of a string.
@summary Find all occurrences of a char in the given string,
then replace with a different given char.
@param string The string being transformed.
@param find The char being found.
@param replace The replacement char.
@return The transformed string.
"""
# set working variables
count = 0
end_str = string
# loop through the chars
# a for loop is easier than a while loop in this case
for char in string:
# if char is the one we want to replace, replace it
if char == find:
end_str = end_str[:count] + replace + end_str[count + 1:]
count += 1
# return the transformed string
return end_str
def replace_str(string, find, replace):
""""
Replace all occurrences of a string inside of a string.
@summary Find all occurrences of a string in the given
string, then replace with a different given string.
@param string The string being transformed.
@param find The string being found.
@param replace The replacement string.
@return The transformed string.
"""
# set working variables
pieces = []
working_piece = string
# loop as long as we can still find the query string
# a for while is easier than a while loop in this case
while find_str(working_piece, find) != -1:
# split the string into pieces, but don't add the 'find' string
pieces += working_piece[:find_str(working_piece, find)]
# add the replace string to the pieces instead
pieces += replace
# redefine the working piece for the next execution of the loop
working_piece = working_piece[find_str(working_piece, find) + len(find):]
# the loop is done, so add the rest of the string to the pieces
pieces += working_piece
# rebuild the string from the pieces
end_str = ""
for piece in pieces:
end_str += piece
# return the new string
return end_str
|
UTF-8
|
Python
| false | false | 2,014 |
17,033,840,333,998 |
f677f322dbb26b4e4c403ba54156f89013f13e1f
|
bda77b66d03a22f261dab048cab6d2cea8ef8553
|
/ex16.py
|
651db13944ff9b48e08e16e4c946ffd75bc9092f
|
[] |
no_license
|
jiri-jagos/lpthw
|
https://github.com/jiri-jagos/lpthw
|
112d5979b1b4712a45ec47fc2b0b3d185b9e75da
|
89364b67e76ea435d155d8be4e531a1f61272a1b
|
refs/heads/master
| 2021-03-12T23:25:08.289286 | 2012-06-03T22:34:54 | 2012-06-03T22:34:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
from sys import argv
script, sFilename = argv
print "We're gonna erase %r" % sFilename
print "Press <Enter> to confirm, <Ctrl> + C to abort"
raw_input("?")
print "Opening the file %r." % sFilename
oFile = open(sFilename, 'w')
print "Truncating the file. Say bye to its content."
oFile.truncate()
print "Now you can insert 3 lines of text:"
sLine1 = raw_input("line 1:\n")
sLine2 = raw_input("line 2:\n")
sLine3 = raw_input("line 3:\n")
print "Now i'll write these lines into the file"
# lines have to be converted to string before writing into the file
sContent = "%s\n%s\n%s\n" % (sLine1, sLine2, sLine3)
oFile.write(sContent)
oFile.close
oFile = open(sFilename)
print "File content is now:\n %s" % oFile.read()
print "Now we can close the file"
oFile.close()
|
UTF-8
|
Python
| false | false | 2,012 |
6,940,667,188,564 |
c84bb8d1a505ba0f0feeb9f26c0b089770121d9c
|
0d901449543395c71a1a0fdf4224333220560fd0
|
/p9.py
|
3a7236f315ff58252b87b30eebdd025643417cc2
|
[] |
no_license
|
jacobcvt12/project_euler
|
https://github.com/jacobcvt12/project_euler
|
32f4fde474b449e833ca7748f97bb7fab18df195
|
ae98ac46a4568d92d885c11f07aaeaa24b6d939c
|
refs/heads/master
| 2021-01-22T07:10:15.181309 | 2014-01-15T17:25:41 | 2014-01-15T17:25:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import sqrt
for a in range(1, 501):
for b in range(a+1, 501):
c = sqrt((a**2) + (b**2))
if int(c) == c:
if (a+b+c) == 1000:
print "a: %d, b: %d, c: %d, abc: %d" % (a, b, c, (a*b*c))
|
UTF-8
|
Python
| false | false | 2,014 |
3,599,182,644,033 |
d97c750cc9ea49d2f182d2f13970e800a65d865e
|
1716fe675182941cea9e8ea02426e31f7d2c2572
|
/analysis/calculate_age.py
|
ca309d4f7287a12b0e1c2a961889fd11b2aa7487
|
[] |
no_license
|
BrodrickChilds/prisoner
|
https://github.com/BrodrickChilds/prisoner
|
871c0bb441d974eb64de3c896aa727c337f1747d
|
533a8736ae7a93e35929ad946fd3e5f5d7b0a816
|
refs/heads/master
| 2021-01-25T08:48:09.750745 | 2013-07-11T23:58:15 | 2013-07-11T23:58:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from datetime import date
def calculate_age(born):
yearborn = int(born[:4])
today = date.today()
return today.year - yearborn
|
UTF-8
|
Python
| false | false | 2,013 |
14,319,420,973,865 |
4501a0a94f78f837c191cb9d21d250133877829e
|
6298f1e0407df2c93390ef9154c4f5410ed121a0
|
/server/GAE-REST-2/wifi-location/src/requests.py
|
4d3aca49cf676b4580699da7c2f1315de23cc8e4
|
[] |
no_license
|
MikeKlemarewski/WifiLocator
|
https://github.com/MikeKlemarewski/WifiLocator
|
5eb35c06023e34bf244a82838976fb5321a8c4a4
|
472ff85770691d135d0727bf166525d7be8d4d84
|
refs/heads/master
| 2020-12-29T02:54:30.408573 | 2012-06-07T04:24:47 | 2012-06-07T04:24:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import cgi
import os
import datetime
import urllib
import wsgiref.handlers
import csv
import rest
import logging
from google.appengine.runtime import apiproxy_errors
from django.utils import simplejson as json
from datetime import datetime
from src.models import *
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
def sendFriendRequest(self,json_obj):
try:
user_obj = Users.get_by_id(int(json_obj["user_id"]))
friend_obj = Users.get_by_id(int(json_obj["friend_id"]))
#user can't add itself
if (json_obj["user_id"]) == (json_obj["friend_id"]) :
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : "unknown", "status" : 4}))
return
#check if user or friend is valid
elif not user_obj :
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : "unknown", "status" : 1}))
return
elif not friend_obj:
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : "unknown", "status" : 2}))
return
#check if request already exist
q = db.GqlQuery(("SELECT * FROM FriendRequests " + "WHERE user_id = :1 and friend_id = :2" ), int(json_obj["friend_id"]),int(json_obj["user_id"]))
if q.count() > 0:
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : q[0].key().id(), "status" : 3}))
return
p = db.GqlQuery(("SELECT * FROM FriendRequests " + "WHERE user_id = :1 and friend_id = :2" ), int(json_obj["user_id"]),int(json_obj["friend_id"]))
if p.count() > 0:
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : p[0].key().id(), "status" : 3}))
return
k = db.GqlQuery(("SELECT * FROM Friends " + "WHERE user = :1 and friend_id = :2" ), Users.get_by_id(int(json_obj["user_id"])),int(json_obj["friend_id"]))
if k.count() > 0:
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : "unknown", "status" : 5}))
return
#sends request
request = FriendRequests(user_id = int(json_obj["friend_id"]), friend_id = int(json_obj["user_id"]))
request.put()
#self.response.out.write("request_sent")
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"request_id" : request.key().id(), "status" : 0}))
except apiproxy_errors.OverQuotaError, message:
logging.error(message)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"status" : 10}))
def getFriendRequests(self, json_obj):
try:
user_obj = Users.get_by_id(int(json_obj["user_id"]))
data = dict()
data["requests"] = []
if not user_obj :
#data["Requests"].append({"request_id" : "unknown"})
data["status"] = 1
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps(data))
return
q = db.GqlQuery(("SELECT * FROM FriendRequests " + "WHERE user_id = :1"), int(json_obj["user_id"]))
if q.count() == 0:
data["status"] = 2
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps(data))
return
for requests in q:
friend = Users.get_by_id(requests.friend_id)
friend_first_name = friend.first_name
friend_last_name = friend.last_name
data["requests"].append({
'first_name' : friend_first_name,
'last_name' : friend_last_name,
'request_id' : requests.key().id()})
data["status"] = 0
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps(data))
except apiproxy_errors.OverQuotaError, message:
logging.error(message)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps({"status" : 10}))
def getEvents(self, json_obj):
data = dict()
user_obj = Users.get_by_id(int(json_obj["user_id"]))
zones = db.GqlQuery(("SELECT * FROM BSSIDZones " +
"WHERE mac_address = :1"), urllib.unquote_plus(json_obj["mac_address"]))
if zones.count() == 0:
#BSSID UNKNOWN
data["status"] = 1
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps(data))
return
data["events"] = []
curr_zone = zones[0].zones
user_obj.last_location = curr_zone.key()
user_obj.put()
logging.debug("user information updated")
logging.debug("location: " + curr_zone.zone_name)
try:
curr_super_zone = curr_zone.super_zone
for events in curr_super_zone.event_super_zone:
#TODO: return super zone map
data["events"].append({'name' : events.name,
'organizer' : events.organizer,
'location' : events.super_zone.super_zone_name,
'start_time' : datetime.ctime(events.start_time),
'end_time' : datetime.ctime(events.end_time),
'location' : events.location,
'description' : events.description})
data["status"] = 0
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.dumps(data))
except:
logging.debug("cant not get events")
|
UTF-8
|
Python
| false | false | 2,012 |
4,836,133,180,875 |
f5334e378f5d234a87194844209233033da57c20
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/lasers/power/power_map.py
|
074c20bb557eb146f6acbc5276273a9c9952de4e
|
[] |
no_license
|
softtrainee/arlab
|
https://github.com/softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
#============= standard library imports ========================
#============= local library imports ==========================
from src.managers.data_managers.h5_data_manager import H5DataManager
from src.lasers.power.power_map_processor import PowerMapProcessor
def show():
pmp = PowerMapProcessor()
reader = H5DataManager()
p = '/Users/ross/Sandbox/powermap.h5'
reader.open_data(p)
# if data.endswith('.h5') or data.endswith('.hdf5'):
# reader = self._data_manager_factory()
# reader.open_data(data)
# else:
# with open(data, 'r') as f:
# reader = csv.reader(f)
# #trim off header
# reader.next()
#
graph = pmp.load_graph(reader)
graph.configure_traits()
# self.graph.width = 625
# self.graph.height = 500
# reader.open_data(data)
# z, _ = pmp._extract_h5(reader)
# if self.surf:
# self.graph3D.plot_data(z, func='surf',
# representation=self.representation,
# warp_scale=self.vertical_ex ,
# outline=self.surf_outline
# )
# if self.contour:
# self.graph3D.plot_data(z, func='contour_surf',
# contours=self.levels,
# warp_scale=self.vertical_ex,
# outline=self.contour_outline
# )
if __name__ == '__main__':
show()
#============= EOF =============================================
|
UTF-8
|
Python
| false | false | 2,013 |
2,740,189,156,248 |
2da10aff1ec5156bad2f1baac408bd0bc8f0eead
|
bab7841cf78f77c067c39fbd262d84fcdb4d72f8
|
/frontend/models/operation.py
|
8ccfa559574c28ee75dfebf85452957ff41c678f
|
[
"MIT"
] |
permissive
|
lijm1206/Bongos
|
https://github.com/lijm1206/Bongos
|
1b4f4cc171513c3bfb459c2fa80929d6674e5f8f
|
d89eb4b30b779704253bf48e794deb65d71850b8
|
refs/heads/master
| 2021-05-28T08:55:51.438205 | 2013-08-16T15:26:31 | 2013-08-16T15:26:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Ruoyan Wong(@saipanno).
#
# Created at 2013/01/16.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from frontend.extensions.database import db
class OperationDb(db.Model):
"""
DATE: time.strftime('%Y-%m-%d %H:%M')
STATUS: 0: 队列中
1: 成功
2: 错误
5: 执行中
other: 异常错误
"""
__tablename__ = 'operations'
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
datetime = db.Column(db.String(50))
operation_type = db.Column(db.String(25))
server_list = db.Column(db.Text)
script_template = db.Column(db.Text)
ext_variables = db.Column(db.Text)
ssh_config = db.Column(db.Integer)
status = db.Column(db.Integer)
result = db.Column(db.Text)
def __init__(self, author_id, operation_type, server_list, script_template, ext_variables, ssh_config, status, result):
self.author_id = author_id
self.datetime = time.strftime('%Y-%m-%d %H:%M')
self.operation_type = operation_type
self.server_list = server_list
self.script_template = script_template
self.ssh_config = ssh_config
self.ext_variables = ext_variables
self.status = status
self.result = result
|
UTF-8
|
Python
| false | false | 2,013 |
197,568,524,235 |
02d66fb888d5d71487a3e79c0fa295f6f101f3ae
|
6de13fbb1e7a9775bb872575d3fdb9cf2d56f019
|
/other/backprob2.py
|
e21faca72b51e3a608ef89b72321bc65a99600db
|
[
"MIT"
] |
permissive
|
PirosB3/WorldMood
|
https://github.com/PirosB3/WorldMood
|
7637e30a5a422c1d8820f7fd9ab8a69b652dcfe3
|
bf1abd0c04c05fa916ac6d6e9142d1549f3340b3
|
refs/heads/master
| 2020-04-12T05:10:07.716830 | 2014-07-09T10:23:10 | 2014-07-09T10:23:10 | 14,931,903 | 2 | 3 | null | false | 2014-03-18T20:08:22 | 2013-12-04T18:19:15 | 2014-03-18T20:08:22 | 2014-03-18T20:08:22 | 84,212 | 0 | 0 | 0 |
Python
| null | null |
import math
import random
N_BIAS = 1
def sigmoid(x):
return math.tanh(x)
def dsigmoid(y):
return 1.0 - y**2
class NeuralNetwork(object):
def buildWeights(self, n1, n2, fill=None):
res = []
for x in range(n1):
inner = []
for z in range(n2):
inner.append(fill or random.uniform(-0.2, 0.2))
res.append(inner)
return res
def __init__(self, n_input, n_hidden, n_output):
# Define nums
self.n_input = n_input + N_BIAS
self.n_output = n_output
self.n_hidden = n_hidden
# Build outputs
self.out_input = [1.0]*self.n_input
self.out_hidden = [1.0]*self.n_hidden
self.out_output = [1.0]*self.n_output
# Build weights
self.w_input_hidden = self.buildWeights(self.n_input, self.n_hidden)
self.w_hidden_output = self.buildWeights(self.n_hidden, self.n_output)
# Track changes
self.change_input_hidden = self.buildWeights(self.n_input, self.n_hidden, 0.0)
self.change_hidden_output = self.buildWeights(self.n_hidden, self.n_output, 0.0)
def back_propagate(self, targets, N, M):
output_deltas = [0] * self.n_output
hidden_deltas = [0] * self.n_hidden
# Calculate output delats
for o in range(self.n_output):
error = targets[o] - self.out_output[o]
output_deltas[o] = dsigmoid(self.out_output[o]) * error
# Calculate hidden delats
for h in range(self.n_hidden):
error = 0.0
for o in range(self.n_output):
error += output_deltas[o] * self.w_hidden_output[h][o]
hidden_deltas[h] = dsigmoid(self.out_hidden[h]) * error
# Update hidden_output weights
for h in range(self.n_hidden):
for o in range(self.n_output):
change = output_deltas[o] * self.out_hidden[h]
self.w_hidden_output[h][o] += N*change \
+ M*self.change_hidden_output[h][o]
self.change_hidden_output[h][o] = change
# Update input_hidden weights
for i in range(self.n_input):
for h in range(self.n_hidden):
change = hidden_deltas[h] * self.out_input[i]
self.w_input_hidden[i][h] += N*change + M*self.change_input_hidden[i][h]
self.change_input_hidden[i][h] = change
# Calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.out_output[k])**2
return error
def update(self, inputs):
for i in range(self.n_input - N_BIAS):
self.out_input[i] = inputs[i]
for h in range(self.n_hidden):
res = [self.w_input_hidden[i][h] * self.out_input[i] for i in range(self.n_input)]
self.out_hidden[h] = sigmoid(sum(res))
for o in range(self.n_output):
res = [self.w_hidden_output[h][o] * self.out_hidden[h] for h in range(self.n_hidden)]
self.out_output[o] = sigmoid(sum(res))
return self.out_output
def train(self, patterns, iterations=100000, N=0.5, M=0.5):
for _ in xrange(iterations):
error = 0.0
for inp, expected in patterns:
self.update(inp)
error += self.back_propagate(expected, N, M)
if _ % 100 == 0:
print('error %-.5f' % error)
def test(self, patterns):
for pattern, expected in patterns:
print "%s -> %s" % (pattern, self.update(pattern))
def main():
pattern = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [1]]
]
#pattern = [
#[[0,0], [0]],
#[[0,1], [1]],
#[[1,0], [1]],
#[[1,1], [0]]
#]
n = NeuralNetwork(2, 4, 1)
n.train(pattern)
n.test(pattern)
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 2,014 |
7,060,926,256,575 |
95a0da857e03b404cd2f835165b9935f85a64801
|
2316d0c8950153f993e59cc8d90904fabecf3ce7
|
/tests/correct3/subscript1.py
|
365b976eee286f2f7ce272f5c5a8962c5d4fe713
|
[] |
no_license
|
SsnL/compyler
|
https://github.com/SsnL/compyler
|
6efb1f9a09a3915faf75b2e9def426080f488454
|
fd05dc49babd90b4953088aadb026885bc8a3388
|
refs/heads/master
| 2020-04-05T22:49:55.377343 | 2013-12-14T00:57:31 | 2013-12-14T00:57:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# basic subscription list: tests subscription on lists
print [1,2,3,4,5][2]
|
UTF-8
|
Python
| false | false | 2,013 |
15,960,098,486,317 |
17d7a3f25ad39a90df521e95ab8f365d3420f992
|
c9c3b544948de4cc9ac1e6989814b3474ff620fe
|
/clustering/device_to_device/cosine_similarity/cosine_similarity.py
|
3a9da167a0355e90b67f3531aa937929227b9f45
|
[] |
no_license
|
rahulramakrishnan/app-recommender-system
|
https://github.com/rahulramakrishnan/app-recommender-system
|
5fc19c5befff09c88b7f84dfb658406f875ba0aa
|
a2674b2ce83e996e81766b5be7fa12d10bf6861c
|
refs/heads/master
| 2016-07-28T02:09:29.850586 | 2014-05-09T01:17:02 | 2014-05-09T01:17:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Create similarity dictionary
#which will be used in
#device_kmeans.py
#We have realized two bugs:
#1. This didn't take into account multiple dimensions
#2. There are > 3000 dimensions so it did not cluster
import random
import math
import json
#Load the device_bit_vectors dictionary
'''
{ "device_id" : "1303032001...",
"device_id2": "0030056300..." }
'''
d = open('./output_txt_files/device_input_vectors.txt', 'r')
device_ids = json.load(d)
def cosineSimilarity(A, B):
return numerator(A,B)/denominator(A,B)
def numerator(A, B):
C = zip(A,B)
numerator = sum(map(lambda pair: pair[0] * pair[1], C))
return float(numerator)
def denominator(A,B):
final_A = math.sqrt(sum(map(lambda x: x**2, A)))
final_B = math.sqrt(sum(map(lambda x: x**2, B)))
denominator = final_A + final_B
return float(denominator)
|
UTF-8
|
Python
| false | false | 2,014 |
9,448,928,053,544 |
5d93debebe653e178765610ce38ed3c898bcd8a2
|
5356c4cbee695b963b849ae93644753868013af7
|
/src/AgregatedEthernet.py
|
5b2151fbacddc82101cfe25aab7b8294b0f8db5d
|
[] |
no_license
|
dynamicdeploy/packet_synthesier
|
https://github.com/dynamicdeploy/packet_synthesier
|
4191e648e5f042a25ad350da0d0a789c32646f55
|
8b7b281531245cbad13d03ba62eab8a807c1e985
|
refs/heads/master
| 2020-12-11T07:50:51.368882 | 2013-02-13T01:38:48 | 2013-02-13T01:38:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on Dec 9, 2012
@author: oleg
'''
from AbstractPacket import AbstractPacket
import imp
class AgregatedEthernet(AbstractPacket):
'''
classdocs
'''
def __init__(self, context):
self.__eth0PacketGenerator = None
self.__eth1PacketGenerator = None
self.__999PacketGenerator = None
self.__defaults = {'dst_mac_level0' : '00:50:43:00:00:01',
'src_mac_level0' : '00:0f:fe:91:fe:d1',
'ether_type_level0' : '0x9201',
'ether_tag_level0' : '',
'preamble_999' : "55:55:55:55:55:55:55:D5",
'sid999' : '0xE001',
'tag999' : '',
'dst_mac_level1' : '00:03:19:00:03:DE',
'src_mac_level1' : 'FF:FF:FF:FF:FF:FF',
'ether_type_level1' : '0x0806',
'ether_tag_level1' : '',
'ether_payload_level1' : "\x80" * 2}
context.update(self.__defaults)
self.__context = context
self.__ethLevel0Context = {}
self.__ethLevel1Context = {}
self.__999Context = {}
def getShortDescription(self):
return ""
def getOptions(self):
return ""
def getFullDescription(self):
return ""
def getDefaults(self):
return ""
def setOption( self, key, value ):
pass
def __loadEthernetModuleLevel0(self):
fp, pathname, description = imp.find_module('EthernetPacket')
try:
module = imp.load_module('EthernetPacket', fp, pathname, description)
packetGenerator = getattr(module, 'EthernetPacket')
self.__eth0PacketGenerator = packetGenerator(self.__ethLevel0Context)
print 'EthernetPacket' + " loaded"
finally:
if fp:
fp.close()
def __loadEthernetModuleLevel1(self):
fp, pathname, description = imp.find_module('EthernetPacket')
try:
module = imp.load_module('EthernetPacket', fp, pathname, description)
packetGenerator = getattr(module, 'EthernetPacket')
self.__eth1PacketGenerator = packetGenerator(self.__ethLevel1Context)
print 'EthernetPacket' + " loaded"
finally:
if fp:
fp.close()
def __load999Module(self):
fp, pathname, description = imp.find_module('Packet_999')
try:
module = imp.load_module('Packet_999', fp, pathname, description)
packetGenerator = getattr(module, 'Packet_999')
self.__999PacketGenerator = packetGenerator(self.__999Context)
print 'Packet_999' + " loaded"
finally:
if fp:
fp.close()
def generatePacket(self):
self.__loadEthernetModuleLevel0()
self.__loadEthernetModuleLevel1()
self.__load999Module()
self.__ethLevel1Context.update({'src_mac' : self.__context['src_mac_level1'],
'dst_mac' : self.__context['dst_mac_level1'],
'ether_type' : self.__context['ether_type_level1'],
'tag' : self.__context['ether_tag_level1'],
'payload' : self.__context['ether_payload_level1']})
ethPacketLevel1 = self.__eth1PacketGenerator.generatePacket()
self.__999Context.update({'preamble' : self.__context['preamble_999'],
'sid' : self.__context['sid999'],
'tag' : self.__context['tag999'],
'payload' : ethPacketLevel1})
Packet999 = self.__999PacketGenerator.generatePacket()
self.__ethLevel0Context.update({'src_mac' : self.__context['src_mac_level0'],
'dst_mac' : self.__context['dst_mac_level0'],
'ether_type' : self.__context['ether_type_level0'],
'tag' : self.__context['ether_tag_level0'],
'payload' : Packet999})
ethPacketLevel0 = self.__eth0PacketGenerator.generatePacket()
return ethPacketLevel0
|
UTF-8
|
Python
| false | false | 2,013 |
6,158,983,131,270 |
ef8545875cdc654f60e6d7cf5553631c50d660cc
|
c0c8c59430d204ffe2005942a3d0a798f2a5b6c2
|
/nodes/feature_saver.py
|
6a2c127006e5d07fd28f068565fc7530621eee3b
|
[] |
no_license
|
yetigeti/slytherin_dagger
|
https://github.com/yetigeti/slytherin_dagger
|
942c809618cbd796e08ec5ca9e7b94083cb255c6
|
9a12cd92c9f884c25764139f3efdb8535484cc29
|
refs/heads/master
| 2020-04-07T18:46:21.973988 | 2014-12-12T04:34:24 | 2014-12-12T04:34:24 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
'''
Simple utility to convert images to features using feature generator node and recorded bag file
'''
import roslib
roslib.load_manifest('slytherin_dagger')
import rospy
import sys
sys.path.append(roslib.packages.get_pkg_dir('slytherin_dagger')+'/src')
import feature_generator as feature
from geometry_msgs.msg import Twist
from std_msgs.msg import Empty
from sensor_msgs.msg import Image
from std_msgs.msg import Float32MultiArray
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rosbag
import os
import numpy as np
if __name__ == '__main__':
rospy.init_node('feature_saver')
rospy.loginfo('started feature saver')
list_of_files = rospy.get_param('~list_of_files')
path_bag = rospy.get_param('~bag_folder')
camera_topic = rospy.get_param('~camera_topic', default='/camera/image_raw')
joy_topic = rospy.get_param('~joy_topic', default='/cmd_vel')
record_topic = rospy.get_param('~record_topic', default='/record')
bridge = CvBridge()
f = open(list_of_files, 'r')
# load all bags in f
counter = 0
for line in f:
# open the current bag file
line2 = line.rstrip(' \t\n')
rospy.loginfo("[DAgger] Opening bag file %s", path_bag + line2)
try:
bag = rosbag.Bag(path_bag + line2)
except rosbag.bag.ROSBagUnindexedException:
rospy.loginfo("[DAgger] Unindexed Bag file %s. Attempting to reindex", path_bag + line2)
call(shlex.split("rosbag reindex %s" % (path_bag + line2)))
try:
bag = rosbag.Bag(path_bag + line2)
rospy.loginfo("[DAgger] Reindexing Succesful")
except rosbag.bag.ROSBagUnindexedException:
rospy.loginfo("[DAgger] Reindexing failed, skipping file %s", path_bag + line2)
continue
name, ext = os.path.splitext(line2)
write_bag = rosbag.Bag(path_bag + name + '_featured' + ext, 'w')
# look at msg in dagger_record topic
camera_msg = None
joy_msg = None
for topic, msg, t in bag.read_messages(topics=[camera_topic, joy_topic]):
if topic == camera_topic:
camera_msg = msg
if topic == joy_topic:
joy_msg = msg
if camera_msg is not None and joy_msg is not None:
#generate features
#convert msg.data to a numpy array
ar = np.array(bridge.imgmsg_to_cv2(camera_msg,desired_encoding='passthrough'), dtype=np.uint8)
bw_img = cv2.cvtColor(ar, cv2.COLOR_RGB2GRAY)
feature_msg = Float32MultiArray()
yaw = np.array(joy_msg.linear.x, dtype=np.float32)
pitch = np.array(joy_msg.linear.y, dtype=np.float32)
feature_msg.data = np.append(feature.findholecentre(bw_img), (yaw, pitch, yaw, pitch))
write_bag.write(camera_topic, camera_msg, t)
write_bag.write(joy_topic, joy_msg,t)
write_bag.write(record_topic, feature_msg, t)
camera_msg = None
joy_msg = None
bag.close()
write_bag.close()
f.close()
|
UTF-8
|
Python
| false | false | 2,014 |
9,680,856,296,198 |
be7471d72864b66b9f665946d5fea1f089e1dd79
|
7b4d088bd18f83b78d35762ae486582c546928c1
|
/cdc/app/theme/middlewares.py
|
1e27639559a8c23a5ed0accc7e7f97ea3c766e5f
|
[
"MIT"
] |
permissive
|
indexofire/cdc
|
https://github.com/indexofire/cdc
|
7a7fdf4ff04a6a27fc870e1ef63f0e9e2c607322
|
541abf4ee9fb0910b33df78d2e863d227b0c7cae
|
refs/heads/master
| 2020-03-29T09:40:57.937724 | 2013-12-03T15:08:57 | 2013-12-03T15:08:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from cdc.app.theme.default import Theme, ThemesManager
from cdc.app.theme.models import UserTheme
from cdc.app.theme.utils import monkey_patch_template_loaders
logger = logging.getLogger('cdc_themes')
class ThemesMiddleware(object):
themes_manager = ThemesManager()
def __init__(self):
themes = getattr(settings, 'THEMES', None)
if not themes:
logger.warning("There is no themes specified. Themes middleware will be disabled.")
raise MiddlewareNotUsed()
for theme in themes:
self.themes_manager.add_theme(Theme(**theme))
default_theme = getattr(settings, 'DEFAULT_THEME', 0)
self.themes_manager.set_default(default_theme)
if not getattr(settings, 'THEMES_USE_TEMPLATE_LOADERS', False):
monkey_patch_template_loaders()
def process_request(self, request):
settings.request_handler = request
request.theme = self.themes_manager.default
if request.user.is_authenticated():
try:
request.theme = self.themes_manager.get_theme(UserTheme.objects.get(user=request.user).theme)
except UserTheme.DoesNotExist:
pass
|
UTF-8
|
Python
| false | false | 2,013 |
5,102,421,194,720 |
c319c6190d5206b8d4e38eab9fb5115c0af892fc
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/page2docbookplugin/page2docbook/pagetodocbook/pagetodocbook.py
|
39a8c0fdb69a01687c63ecb8707c033bff63cad5
|
[] |
no_license
|
woochica/trachacks
|
https://github.com/woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from trac.core import *
from trac.util import escape
from trac.mimeview.api import IContentConverter
from trac.wiki.formatter import wiki_to_html
import os
import re
from cStringIO import StringIO
import urllib
from tidy import parseString
import libxml2
import libxslt
from pkg_resources import resource_filename
class PageToDocbookPlugin(Component):
"""Convert Wiki pages to docbook."""
implements(IContentConverter)
def wiki_to_docbook(wikitext, env, req, db=None, absurls=False, escape_newlines=False):
if not wikitext:
return ""
out = StringIO()
DocbookFormatter(env, req, absurls, db).format(wikitext, out, escape_newlines)
return out.getvalue()
# IContentConverter methods
def get_supported_conversions(self):
yield ('docbook', 'Docbook', 'docbook', 'text/x-trac-wiki', 'application/docbook+xml', 7)
def convert_content(self, req, input_type, source, output_type):
#extract all data resources
datadir = resource_filename(__name__, 'data')
html = wiki_to_html(source, self.env, req)
options = dict(output_xhtml=1, add_xml_decl=1, indent=1, tidy_mark=0, input_encoding='utf8', output_encoding='utf8', doctype='auto', wrap=0, char_encoding='utf8')
xhtml = parseString(html.encode("utf-8"), **options)
xhtml2dbXsl = u"""<?xml version="1.0"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:import href=\"file:///""" + urllib.pathname2url(resource_filename(__name__, 'data/html2db/html2db.xsl')) + """\" />
<xsl:output method="xml" indent="no" encoding="utf-8"/>
<xsl:param name="document-root" select="'chapter'"/>
</xsl:stylesheet>
"""
normalizedHeadingsXsl = u"""<?xml version="1.0"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:import href=\"file:///""" + urllib.pathname2url(resource_filename(__name__, 'data/headingsNormalizer/headingsNormalizer.xsl')) + """\" />
<xsl:output method="xml" indent="no" encoding="utf-8"/>
<xsl:param name="defaultTopHeading" select="'""" + req.path_info[6:] + """'"/>
</xsl:stylesheet>
"""
xhtml_xmldoc = libxml2.parseDoc(str(xhtml))
normalizedHeadingsXsl_xmldoc = libxml2.parseDoc(normalizedHeadingsXsl)
normalizedHeadingsXsl_xsldoc = libxslt.parseStylesheetDoc(normalizedHeadingsXsl_xmldoc)
xhtml2_xmldoc = normalizedHeadingsXsl_xsldoc.applyStylesheet(xhtml_xmldoc, None)
nhstring = normalizedHeadingsXsl_xsldoc.saveResultToString(xhtml2_xmldoc)
xhtml2dbXsl_xmldoc = libxml2.parseDoc(xhtml2dbXsl)
xhtml2dbXsl_xsldoc = libxslt.parseStylesheetDoc(xhtml2dbXsl_xmldoc)
docbook_xmldoc = xhtml2dbXsl_xsldoc.applyStylesheet(xhtml2_xmldoc, None)
dbstring = xhtml2dbXsl_xsldoc.saveResultToString(docbook_xmldoc)
xhtml_xmldoc.freeDoc()
normalizedHeadingsXsl_xsldoc.freeStylesheet()
xhtml2dbXsl_xsldoc.freeStylesheet()
xhtml2_xmldoc.freeDoc()
docbook_xmldoc.freeDoc()
return (dbstring, 'text/plain') #application/docbook+xml
|
UTF-8
|
Python
| false | false | 2,013 |
8,031,588,883,862 |
bb0e72f30eccb935c44291a3959ae3f434fe6ebf
|
fd90e966f07e78f45f039b204f1d032bd55469a0
|
/admin/rccps/rccpsHPDZCZModify.py
|
17f8964903ee504b7b61817510921b3f3fdba22f
|
[] |
no_license
|
simonliuyl/AFA1.0
|
https://github.com/simonliuyl/AFA1.0
|
882c3d8e4a8c7b28cbedc7e57fa577de2667ea47
|
9a3cabd7886904321d09431030eb72918d39999e
|
refs/heads/master
| 2021-01-01T18:33:56.298377 | 2014-01-11T15:35:16 | 2014-01-11T15:35:16 | 15,824,844 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: gbk -*-
################################################################################
# 农信银系统:系统调度类.汇票对账错账处理
#===============================================================================
# 交易文件: rccpsHPDZCZModify.py
# 公司名称: 北京赞同科技有限公司
# 作 者: 关彬捷
# 修改时间: 2008-06-27
################################################################################
import TradeContext
TradeContext.sysType = 'cron'
import AfaLoggerFunc,AfaUtilTools,AfaDBFunc,TradeFunc,AfaFlowControl,os,AfaFunc,sys,AfaHostFunc,time
from types import *
from rccpsConst import *
import rccpsCronFunc,rccpsState,rccpsDBFunc,rccpsHostFunc,rccpsFunc,rccpsGetFunc
import rccpsDBTrcc_mbrifa,rccpsDBTrcc_hpdzcz,rccpsDBTrcc_sstlog,rccpsDBTrcc_bilbka,rccpsDBTrcc_hpdzmx
import rccpsMap0000Dhpdzmx2CTradeContext,rccpsMap0000Dbilbka2CTradeContext,rccpsMap0000Dbilinf2CTradeContext,rccpsMap1113CTradeContext2Dbilbka
if __name__ == '__main__':
try:
AfaLoggerFunc.tradeInfo("***农信银系统: 系统调度类.汇票对账错账处理[rccpsHPDZCZModify]进入***")
#==========获取中心日期================================================
AfaLoggerFunc.tradeInfo(">>>开始获取前中心工作日期")
mbrifa_where_dict = {}
mbrifa_where_dict['OPRTYPNO'] = "20"
mbrifa_dict = rccpsDBTrcc_mbrifa.selectu(mbrifa_where_dict)
if mbrifa_dict == None:
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccpsCronFunc.cronExit("S999","查询当前中心日期异常")
NCCWKDAT = mbrifa_dict['NOTE1'][:8] #对账日期
LNCCWKDAT = "('" + mbrifa_dict['NOTE4'].replace(",","','") + "')"
AfaLoggerFunc.tradeInfo(">>>结束获取前中心工作日期")
#================往账行内有,中心无======================================
AfaLoggerFunc.tradeInfo(">>>开始处理往账行内有,中心无类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '01' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型账务处理由主机方调账,本系统设置交易状态为抹账成功,成功后修改错账处理标识为已处理")
for i in xrange(len(hpdzcz_list)):
#========设置状态为长款=========================================
AfaLoggerFunc.tradeInfo("开始修改原交易状态为长款")
TradeContext.BESBNO = PL_BESBNO_BCLRSB
TradeContext.BETELR = PL_BETELR_AUTO
#========设置交易状态为长款成功=================================
if not rccpsState.newTransState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],PL_BCSTAT_LONG,PL_BDWFLG_SUCC):
rccpsCronFunc.cronExit('S999', '设置长款成功状态异常')
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo("结束修改原交易状态为长款")
#========修改错账处理标识为已处理===============================
hpdzcz_update_dict = {}
hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
hpdzcz_where_dict = {}
hpdzcz_where_dict['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
hpdzcz_where_dict['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理往账行内有,中心无类型")
#================往账行内无,中心有======================================
AfaLoggerFunc.tradeInfo(">>>开始处理往账行内无,中心有类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '02' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型为系统异常,需科技人员查实后处理")
#for i in xrange(len(hpdzcz_list)):
# hpdzcz_update_dict = {}
# hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
#
# hpdzcz_where_dict = {}
# hpdzcz_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
# hpdzcz_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
# hpdzcz_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
#
# ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
#
# if ret <= 0:
# AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
# rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理往账行内无,中心有类型")
#================往账行内清算,中心未清算================================
AfaLoggerFunc.tradeInfo(">>>开始处理往账行内清算,中心未清算类型")
hpdzcz_where_sql = "nccwkdat = '" + NCCWKDAT + "' and eactyp = '03' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型为系统异常,需科技人员查实后处理")
for i in xrange(len(hpdzcz_list)):
#========设置状态为长款=========================================
AfaLoggerFunc.tradeInfo("开始修改原交易状态为长款")
TradeContext.BESBNO = PL_BESBNO_BCLRSB
TradeContext.BETELR = PL_BETELR_AUTO
#========设置交易状态为长款成功=================================
if not rccpsState.newTransState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],PL_BCSTAT_LONG,PL_BDWFLG_SUCC):
rccpsCronFunc.cronExit('S999', '设置长款成功状态异常')
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo("结束修改原交易状态为长款")
#========修改错账处理标识为已处理===============================
hpdzcz_update_dict = {}
hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
hpdzcz_where_dict = {}
hpdzcz_where_dict['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
hpdzcz_where_dict['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理往账行内清算,中心未清算类型")
#================往账行内未清算,中心清算================================
AfaLoggerFunc.tradeInfo(">>>开始处理往账行内未清算,中心清算类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '04' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型账务处理由主机方处理,本系统设置交易状态为清算\短款成功,成功后修改错账处理标识为已处理")
for i in xrange(len(hpdzcz_list)):
tmp_stat_where_dict = {}
tmp_stat_where_dict['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
tmp_stat_where_dict['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
tmp_stat_where_dict['BCSTAT'] = PL_BCSTAT_ACC
tmp_stat_where_dict['BDWFLG'] = PL_BDWFLG_SUCC
tmp_stat_dict = {}
tmp_stat_dict = rccpsDBTrcc_sstlog.selectu(tmp_stat_where_dict)
if tmp_stat_dict == None:
AfaCronFunc.cronExit('S999','查询交易记账状态异常')
#if not rccpsState.getTransStateSet(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],PL_BCSTAT_ACC,PL_BDWFLG_SUCC,tmp_stat_dict):
HPSTAT = ''
if hpdzcz_list[i]['TRCCO'] == '2100001':
#汇票签发
HPSTAT = PL_HPSTAT_SIGN
elif hpdzcz_list[i]['TRCCO'] == '2100100':
#汇票解付
HPSTAT = PL_HPSTAT_PAYC
elif hpdzcz_list[i]['TRCCO'] == '2100101':
#汇票撤销
HPSTAT = PL_HPSTAT_CANC
elif hpdzcz_list[i]['TRCCO'] == '2100102':
#汇票挂失
HPSTAT = PL_HPSTAT_HANG
elif hpdzcz_list[i]['TRCCO'] == '2100103':
#汇票退票
HPSTAT = PL_HPSTAT_RETN
elif hpdzcz_list[i]['TRCCO'] == '2100104':
#汇票解挂
HPSTAT = PL_HPSTAT_DEHG
#业务状态无记账状态而且非汇票挂失和汇票解挂交易
if len(tmp_stat_dict) <= 0 and hpdzcz_list[i]['TRCCO'] != '2100102' and hpdzcz_list[i]['TRCCO'] != '2100104':
AfaLoggerFunc.tradeInfo("此交易非汇票挂失\解挂交易,且未记账成功")
AfaLoggerFunc.tradeInfo("设短款")
#========设短款============================================
TradeContext.BESBNO = PL_BESBNO_BCLRSB
TradeContext.BETELR = PL_BETELR_AUTO
AfaLoggerFunc.tradeInfo(">>>开始设置短款处理中状态")
#========设置交易状态为短款处理中==========================
if not rccpsState.newTransState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],PL_BCSTAT_SHORT,PL_BDWFLG_WAIT):
rccpsCronFunc.cronExit('S999', '设置短款处理中状态异常')
AfaLoggerFunc.tradeInfo(">>>结束设置短款处理中状态")
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo(">>>开始设置汇票状态")
if not rccpsState.newBilState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],HPSTAT):
rccpsCronFunc.cronExit('S999', '设置汇票状态异常')
AfaLoggerFunc.tradeInfo(">>>结束设置汇票状态")
AfaLoggerFunc.tradeInfo(">>>开始设置短款成功状态")
tmp_stat = {}
tmp_stat['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
tmp_stat['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
tmptrc_dict = {}
if not rccpsDBFunc.getTransBil(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],tmptrc_dict):
rccpsCronFunc.cronExit('S999', '查询汇票交易业务信息异常')
if not rccpsDBFunc.getInfoBil(tmptrc_dict['BILVER'],tmptrc_dict['BILNO'],tmptrc_dict['BILRS'],tmptrc_dict):
rccpsCronFunc.cronExit('S999', '查询汇票信息异常')
TradeContext.SBAC = TradeContext.BESBNO + PL_ACC_NXYDQSWZ #借农信银待清算往账
TradeContext.ACNM = "农信银待清算往账"
TradeContext.RBAC = tmptrc_dict['PYHACC'] #贷持票人账号
TradeContext.OTNM = tmptrc_dict['PYHNAM']
#=====开始调函数拼贷方账号第25位校验位====
TradeContext.SBAC = rccpsHostFunc.CrtAcc(TradeContext.SBAC, 25)
tmp_stat['BCSTAT'] = PL_BCSTAT_SHORT
tmp_stat['BDWFLG'] = PL_BDWFLG_SUCC
if not rccpsState.setTransState(tmp_stat):
rccpsCronFunc.cronExit('S999', '设置短款成功状态异常')
AfaLoggerFunc.tradeInfo(">>>结束设置短款成功状态")
else:
AfaLoggerFunc.tradeInfo("补清算")
#========补清算============================================
TradeContext.BESBNO = PL_BESBNO_BCLRSB
TradeContext.BETELR = PL_BETELR_AUTO
#========设置交易状态为清算处理中==========================
AfaLoggerFunc.tradeInfo("开始设置清算处理中状态")
if not rccpsState.newTransState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],PL_BCSTAT_MFESTL,PL_BDWFLG_SUCC):
rccpsCronFunc.cronExit('S999', '设置清算处理中状态异常')
AfaLoggerFunc.tradeInfo("结束设置清算处理中状态")
#COMMIT
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo(">>>开始设置汇票状态")
if not rccpsState.newBilState(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],HPSTAT):
rccpsCronFunc.cronExit('S999', '设置汇票状态异常')
AfaLoggerFunc.tradeInfo(">>>结束设置汇票状态")
AfaLoggerFunc.tradeInfo(">>>开始设置清算成功状态")
tmp_stat = {}
tmp_stat['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
tmp_stat['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
tmp_stat['BCSTAT'] = PL_BCSTAT_MFESTL
tmp_stat['BDWFLG'] = PL_BDWFLG_SUCC
if not rccpsState.setTransState(tmp_stat):
rccpsCronFunc.cronExit('S999', '设置清算成功状态异常')
AfaLoggerFunc.tradeInfo(">>>结束设置清算成功状态")
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
#========修改错账处理标识为已处理==============================
hpdzcz_update_dict = {}
hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
hpdzcz_where_dict = {}
hpdzcz_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
hpdzcz_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
hpdzcz_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理往账行内未清算,中心清算类型")
#================来账行内有,中心无=====================================
AfaLoggerFunc.tradeInfo(">>>开始处理来账行内有,中心无类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '05' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型为系统异常,需科技人员查实后处理")
#for i in xrange(len(hpdzcz_list)):
# hpdzcz_update_dict = {}
# hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
#
# hpdzcz_where_dict = {}
# hpdzcz_where_dict['BJEDTE'] = hpdzcz_list[i]['BJEDTE']
# hpdzcz_where_dict['BSPSQN'] = hpdzcz_list[i]['BSPSQN']
#
# ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
#
# if ret <= 0:
# AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
# rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理来账行内有,中心无类型")
#================来账行内无,中心有=====================================
AfaLoggerFunc.tradeInfo(">>>开始处理来账行内无,中心有类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '06' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型需补来账,成功后修改错账处理标识为已处理")
for i in xrange(len(hpdzcz_list)):
#========补来账================================================
AfaLoggerFunc.tradeInfo(">>>开始补来账")
#========初始化来账上下文======================================
AfaLoggerFunc.tradeInfo(">>>开始初始化上下文")
TradeContext.tradeResponse=[]
hpdzmx_where_dict = {}
hpdzmx_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
hpdzmx_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
hpdzmx_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
hpdzmx_dict = rccpsDBTrcc_hpdzmx.selectu(hpdzmx_where_dict)
if hpdzmx_dict == None:
rccpsCronFunc.cronExit("S999","查询来账明细数据异常")
if len(hpdzmx_dict) <= 0:
rccpsCronFunc.cronExit("S999","登记簿中无此来账明细数据")
rccpsMap0000Dhpdzmx2CTradeContext.map(hpdzmx_dict)
TradeContext.OCCAMT = str(TradeContext.OCCAMT)
TradeContext.TemplateCode = 'RCC005'
TradeContext.BRSFLG = PL_BRSFLG_RCV
TradeContext.CUR = '01'
TradeContext.BILRS = '0'
TradeContext.TransCode = '1113'
TradeContext.OPRNO = PL_HPOPRNO_JF
#=====================获取系统日期时间=========================
TradeContext.BJEDTE = AfaUtilTools.GetHostDate( )
#TradeContext.TRCDAT = AfaUtilTools.GetHostDate( )
TradeContext.BJETIM = AfaUtilTools.GetSysTime( )
#TradeContext.BJEDTE = PL_BJEDTE #测试,暂时使用
#TradeContext.TRCDAT = PL_BJEDTE #测试,暂时使用
#=====================系统公共校验=============================
if not rccpsFunc.ChkPubInfo(PL_BRSFLG_RCV) :
raise Exception
#=====================机构合法性校验===========================
if not rccpsFunc.ChkUnitInfo( PL_BRSFLG_RCV ) :
raise Exception
#=====================获取中心日期=============================
TradeContext.NCCworkDate = TradeContext.NCCWKDAT
#=====================获取平台流水号===========================
if rccpsGetFunc.GetSerialno(PL_BRSFLG_RCV) == -1 :
raise Exception
#=====================获取中心流水号===========================
if rccpsGetFunc.GetRccSerialno( ) == -1 :
raise Exception
AfaLoggerFunc.tradeInfo(">>>结束初始化上下文")
#=====================登记来账信息=============================
AfaLoggerFunc.tradeInfo(">>>开始登记来账信息")
#=====================币种转换=================================
if TradeContext.CUR == 'CNY':
TradeContext.CUR = '01'
#=====================开始向字典赋值===========================
bilbka_dict = {}
if not rccpsMap1113CTradeContext2Dbilbka.map(bilbka_dict):
rccpsCronFunc.cronExit('M999', '字典赋值出错')
bilbka_dict['DCFLG'] = PL_DCFLG_CRE #借贷标识
bilbka_dict['OPRNO'] = TradeContext.OPRNO #业务属性
#=====================开始插入数据库===========================
if not rccpsDBFunc.insTransBil(bilbka_dict):
rccpsCronFunc.cronExit('D002', '插入数据库异常')
#=====================commit操作===============================
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeFatal( AfaDBFunc.sqlErrMsg )
rccpsCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo('插入汇兑业务登记簿成功')
#=====================设置状态为收妥===========================
sstlog = {}
sstlog['BSPSQN'] = TradeContext.BSPSQN
sstlog['BJEDTE'] = TradeContext.BJEDTE
sstlog['BCSTAT'] = PL_BCSTAT_BNKRCV
sstlog['BDWFLG'] = PL_BDWFLG_SUCC
#=====================设置状态为 收妥-成功=====================
if not rccpsState.setTransState(sstlog):
rccpsCronFunc.cronExit(TradeContext.errorCode, TradeContext.errorMsg)
#=====================commit操作===============================
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeFatal( AfaDBFunc.sqlErrMsg )
rccpsCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo(">>>结束登记来账信息")
AfaLoggerFunc.tradeInfo(">>>结束补来账")
#========更新汇票对账明细登记簿================================
AfaLoggerFunc.tradeInfo(">>>开始更新汇兑明细登记簿")
hpdzmx_where_dict = {}
hpdzmx_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
hpdzmx_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
hpdzmx_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
stat_dict = {}
if not rccpsState.getTransStateCur(TradeContext.BJEDTE,TradeContext.BSPSQN,stat_dict):
rccpsCronFunc.cronExit(TradeContext.errorCode,TradeContext.errorMsg)
hpdzmx_update_dict = {}
hpdzmx_update_dict['BJEDTE'] = TradeContext.BJEDTE
hpdzmx_update_dict['BSPSQN'] = TradeContext.BSPSQN
hpdzmx_update_dict['BCSTAT'] = stat_dict['BCSTAT']
hpdzmx_update_dict['BDWFLG'] = stat_dict['BDWFLG']
ret = rccpsDBTrcc_hpdzmx.updateCmt(hpdzmx_update_dict,hpdzmx_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","登记汇兑对账明细登记簿交易行内信息异常")
AfaLoggerFunc.tradeInfo(">>>结束更新汇兑明细登记簿")
#========修改错账类型为行内未记账,中心清算=====================
hpdzcz_update_dict = {}
hpdzcz_update_dict['EACTYP'] = '08'
hpdzcz_update_dict['EACINF'] = '来账行内未记账,中心清算'
hpdzcz_update_dict['BJEDTE'] = TradeContext.BJEDTE
hpdzcz_update_dict['BSPSQN'] = TradeContext.BSPSQN
hpdzcz_where_dict = {}
hpdzcz_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
hpdzcz_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
hpdzcz_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","修改汇兑对账错账登记簿处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理来账行内无,中心有类型")
#================来账行内记账,中心未清算===============================
AfaLoggerFunc.tradeInfo(">>>开始处理来账行内记账,中心未清算类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '07' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型为系统异常,需科技人员查实后处理")
#for i in xrange(len(hpdzcz_list)):
# hpdzcz_update_dict = {}
# hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
#
# hpdzcz_where_dict = {}
# hpdzcz_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
# hpdzcz_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
# hpdzcz_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
#
# ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
#
# if ret <= 0:
# AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
# rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束处理来账行内记账,中心未清算类型")
#================来账行内未记账,中心清算===============================
AfaLoggerFunc.tradeInfo(">>>开始处理来账行内未记账,中心清算类型")
hpdzcz_where_sql = "nccwkdat in " + LNCCWKDAT + " and eactyp = '08' and isdeal = '" + PL_ISDEAL_UNDO + "'"
hpdzcz_list = rccpsDBTrcc_hpdzcz.selectm(1,0,hpdzcz_where_sql,"")
if hpdzcz_list == None:
AfaLoggerFunc.tradeInfo(AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","查询此错账类型相关记录异常")
elif len(hpdzcz_list) <= 0:
AfaLoggerFunc.tradeInfo("无此错账类型相关记录")
else:
AfaLoggerFunc.tradeInfo("此错账类型需补挂账,成功后修改错账处理标识为已处理")
for i in xrange(len(hpdzcz_list)):
#============补记账最多补三次==================================
j = 0 #计数器初始化
while 1 == 1:
j = j + 1 #计数器加1
#========初始化数据========================================
AfaLoggerFunc.tradeInfo(">>>开始初始化挂账数据")
trc_dict = {}
if not rccpsDBFunc.getTransBil(hpdzcz_list[i]['BJEDTE'],hpdzcz_list[i]['BSPSQN'],trc_dict):
rccpsCronFunc.cronExit("S999","查询此交易相关信息异常")
if not rccpsMap0000Dbilbka2CTradeContext.map(trc_dict):
rccpsCronFunc.cronExit("S999","将交易信息赋值到TradeContext异常")
if not rccpsDBFunc.getInfoBil(TradeContext.BILVER,TradeContext.BILNO,TradeContext.BILRS,trc_dict):
rccpsCronFunc.cronExit("S999","查询此交易相关汇票信息异常")
if not rccpsMap0000Dbilinf2CTradeContext.map(trc_dict):
rccpsCronFunc.cronExit("S999","将汇票信息赋值到TradeContext异常")
TradeContext.BJETIM = AfaUtilTools.GetSysTime( )
TradeContext.BEAUUS = TradeContext.BEAUUS
TradeContext.BEAUPS = TradeContext.BEAUPS
TradeContext.OCCAMT = str(TradeContext.OCCAMT)
TradeContext.NCCworkDate = TradeContext.NCCWKDAT
AfaLoggerFunc.tradeInfo(">>>结束初始化挂账数据")
#========来账,补账=========================================
AfaLoggerFunc.tradeInfo('>>>来账行内未清算,中心清算,自动挂账')
AfaLoggerFunc.tradeInfo("汇票解付来账,自动入账")
TradeContext.HostCode = '8813' #调用8813主机接口
TradeContext.BCSTAT = PL_BCSTAT_AUTO #自动入账
TradeContext.BDWFLG = PL_BDWFLG_WAIT #处理中
#====拼借贷方账户====
TradeContext.SBAC = TradeContext.BESBNO + PL_ACC_HCHK #借方账户
TradeContext.SBAC = rccpsHostFunc.CrtAcc(TradeContext.SBAC, 25)
TradeContext.RBAC = TradeContext.BESBNO + PL_ACC_NXYDQSLZ #贷方账号
TradeContext.RBAC = rccpsHostFunc.CrtAcc(TradeContext.RBAC, 25)
TradeContext.REAC = TradeContext.BESBNO + PL_ACC_NXYDXZ #挂账账户
TradeContext.REAC = rccpsHostFunc.CrtAcc(TradeContext.REAC, 25)
AfaLoggerFunc.tradeInfo( '借方账号1:' + TradeContext.SBAC )
AfaLoggerFunc.tradeInfo( '贷方账号1:' + TradeContext.RBAC )
AfaLoggerFunc.tradeInfo( '挂账账号1:' + TradeContext.REAC )
AfaLoggerFunc.tradeInfo(">>>开始判断是否存在多余款操作")
#=====判断记账次数====
#关彬捷 20080913 增加实际结算金额摘要代码
TradeContext.RCCSMCD = PL_RCCSMCD_HPJF #摘要代码
if float(TradeContext.RMNAMT) != 0.00:
AfaLoggerFunc.tradeInfo(">>>第二次记账赋值操作")
TradeContext.ACUR = '2' #记账循环次数
TradeContext.TRFG = '9' #凭证处理标识'
TradeContext.I2CETY = '' #凭证种类
TradeContext.I2TRAM = str(TradeContext.RMNAMT) #结余金额
TradeContext.I2SMCD = PL_RCCSMCD_HPJF #摘要代码
TradeContext.I2RBAC = TradeContext.BESBNO + PL_ACC_DYKJQ #贷方账号
TradeContext.I2SBAC = TradeContext.BESBNO + PL_ACC_HCHK #借方账号
TradeContext.I2REAC = TradeContext.BESBNO + PL_ACC_NXYDXZ #挂账账号
#=====生成账号校验位====
TradeContext.I2SBAC = rccpsHostFunc.CrtAcc(TradeContext.I2SBAC,25)
TradeContext.I2RBAC = rccpsHostFunc.CrtAcc(TradeContext.I2RBAC,25)
TradeContext.I2REAC = rccpsHostFunc.CrtAcc(TradeContext.I2REAC,25)
AfaLoggerFunc.tradeInfo( '借方账号2:' + TradeContext.I2SBAC )
AfaLoggerFunc.tradeInfo( '贷方账号2:' + TradeContext.I2RBAC )
AfaLoggerFunc.tradeInfo( '挂账账号2:' + TradeContext.I2REAC )
AfaLoggerFunc.tradeInfo(">>>结束判断是否存在多余款操作")
#======================修改登记簿中交易机构号为当前机构号======
AfaLoggerFunc.tradeInfo(">>>开始更新汇兑业务登记簿交易机构号")
bilbka_update_dict = {}
bilbka_update_dict['BESBNO'] = TradeContext.BESBNO
bilbka_where_dict = {}
bilbka_where_dict['BJEDTE'] = TradeContext.BJEDTE
bilbka_where_dict['BSPSQN'] = TradeContext.BSPSQN
ret = rccpsDBTrcc_bilbka.update(bilbka_update_dict,bilbka_where_dict)
if ret <= 0:
rccpsCronFunc.cronExit('S999','更新汇兑业务登记簿中机构号异常')
AfaLoggerFunc.tradeInfo(">>>结束更新汇兑业务登记簿交易机构号")
#======================新增sstlog表状态记录====================
AfaLoggerFunc.tradeInfo(">>>开始新增交易状态")
if not rccpsState.newTransState(TradeContext.BJEDTE,TradeContext.BSPSQN,TradeContext.BCSTAT,TradeContext.BDWFLG):
rccpsCronFunc.cronExit(TradeContext.errorCode, TradeContext.errorMsg)
AfaLoggerFunc.tradeInfo(">>>结束新增交易状态")
#======================commit操作==============================
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeFatal( AfaDBFunc.sqlErrMsg )
rccpsCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
#=====================与主机通讯===============================
rccpsHostFunc.CommHost(TradeContext.HostCode)
#=========开始向状态字典赋值===================================
stat_dict = {}
stat_dict['BJEDTE'] = TradeContext.BJEDTE #交易日期
stat_dict['BSPSQN'] = TradeContext.BSPSQN #报单序号
stat_dict['BJETIM'] = TradeContext.BJETIM #交易时间
stat_dict['BESBNO'] = TradeContext.BESBNO #机构号
stat_dict['BETELR'] = TradeContext.BETELR #柜员号
stat_dict['SBAC'] = TradeContext.SBAC #借方账号
stat_dict['RBAC'] = TradeContext.REAC #贷方账号
stat_dict['MGID'] = TradeContext.errorCode #主机返回代码
stat_dict['STRINFO'] = TradeContext.errorMsg #主机返回信息
stat_dict['NOTE3'] = ""
#=========判断主机返回结果=================================
if TradeContext.errorCode == '0000':
AfaLoggerFunc.tradeInfo("来账补记账成功,设置状态为自动入账\自动挂账成功")
stat_dict['BCSTAT'] = TradeContext.BCSTAT #流水状态
stat_dict['BDWFLG'] = PL_BDWFLG_SUCC #流转处理标识
stat_dict['TRDT'] = TradeContext.TRDT #主机日期
stat_dict['TLSQ'] = TradeContext.TLSQ #主机流水
#====若主机记账成功,但返回代销账序号不为空,则设置业务状态为挂账===
if TradeContext.existVariable('DASQ'):
if TradeContext.DASQ != '':
AfaLoggerFunc.tradeInfo("主机记账成功,但返回代销账序号非空,设置业务状态为挂账成功")
TradeContext.BCSTAT = PL_BCSTAT_HANG
stat_dict['DASQ'] = TradeContext.DASQ #销账序号
stat_dict['NOTE3'] = "主机方挂账"
else:
AfaLoggerFunc.tradeInfo("来账补记账失败,设置状态为自动入账\自动挂账失败")
stat_dict['BCSTAT'] = TradeContext.BCSTAT #流水状态
stat_dict['BDWFLG'] = PL_BDWFLG_FAIL #流转处理标识
#=========设置状态=========================================
if not rccpsState.setTransState(stat_dict):
rccpsCronFunc.cronExit('S999', '设置状态异常')
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
#========若主机返回失败,睡眠五分钟,进入下一次补记账========
if TradeContext.errorCode != '0000':
if j < 3:
AfaLoggerFunc.tradeInfo("第[" + str(j) + "]次来账补记账失败,睡眠五分钟,进入下一次补记账")
time.sleep(300)
continue
else:
AfaLoggerFunc.tradeInfo("第[" + str(j) + "]次来账补记账失败,停止此来账业务补记账,进入下一来账业务补记账")
#======置未完成标识为True==========================
uncomplate_flag = True
break
else:
AfaLoggerFunc.tradeInfo("补记账成功")
#========修改错账处理标识为已处理==============================
AfaLoggerFunc.tradeInfo(">>>开始修改错账处理标识为已处理")
hpdzcz_update_dict = {}
hpdzcz_update_dict['ISDEAL'] = PL_ISDEAL_ISDO
hpdzcz_where_dict = {}
hpdzcz_where_dict['SNDBNKCO'] = hpdzcz_list[i]['SNDBNKCO']
hpdzcz_where_dict['TRCDAT'] = hpdzcz_list[i]['TRCDAT']
hpdzcz_where_dict['TRCNO'] = hpdzcz_list[i]['TRCNO']
ret = rccpsDBTrcc_hpdzcz.updateCmt(hpdzcz_update_dict,hpdzcz_where_dict)
if ret <= 0:
AfaLoggerFunc.tradeInfo("sqlErrMsg=" + AfaDBFunc.sqlErrMsg)
rccpsCronFunc.cronExit("S999","修改此错账处理标识异常")
AfaLoggerFunc.tradeInfo(">>>结束修改错账处理标识为已处理")
break
AfaLoggerFunc.tradeInfo(">>>结束处理来账行内未清算,中心清算类型")
#================关闭汇票对账汇票对账错账处理系统调度,打开汇票对账明细文件生成及发送到主机系统调度==
AfaLoggerFunc.tradeInfo(">>>开始关闭汇票对账汇票对账错账处理系统调度,打开汇票对账明细文件生成及发送到主机系统调度")
if not rccpsCronFunc.closeCron("00040"):
rccpsCronFunc.cronExit("S999","关闭汇票对账错账处理系统调度异常")
if not rccpsCronFunc.openCron("00045"):
rccpsCronFunc.cronExit("S999","打开汇票对账明细文件生成及发送到主机系统调度异常")
if not AfaDBFunc.CommitSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
rccpsCronFunc.cronExit("S999","Commit异常")
AfaLoggerFunc.tradeInfo(">>>Commit成功")
AfaLoggerFunc.tradeInfo(">>>结束关闭汇票对账汇票对账错账处理系统调度,打开汇票对账明细文件生成及发送到主机系统调度")
AfaLoggerFunc.tradeInfo("***农信银系统: 系统调度类.汇票对账错账处理[rccpsHPDZCZModify]退出***")
except Exception, e:
#所有异常
if not AfaDBFunc.RollbackSql( ):
AfaLoggerFunc.tradeInfo( AfaDBFunc.sqlErrMsg )
AfaLoggerFunc.tradeInfo(">>>Rollback异常")
AfaLoggerFunc.tradeInfo(">>>Rollback成功")
if( not TradeContext.existVariable( "errorCode" ) or str(e) ):
TradeContext.errorCode = 'A9999'
TradeContext.errorMsg = '系统错误['+ str(e) +']'
if TradeContext.errorCode != '0000' :
AfaLoggerFunc.tradeInfo( 'errorCode=['+TradeContext.errorCode+']' )
AfaLoggerFunc.tradeInfo( 'errorMsg=['+TradeContext.errorMsg+']' )
AfaLoggerFunc.tradeInfo("***[rccpsHPDZCZModify]交易中断***")
sys.exit(-1)
|
GB18030
|
Python
| false | false | 2,014 |
15,410,342,678,831 |
9415b3a66de450ff86f2406f1b81d34b6558b7bc
|
8c7c781fc7cc9c9cd5511fcb0b44203a6d1e5c53
|
/Tester.py
|
7e56eddd77f2dcff9bf502062394ffd0da6aebfb
|
[] |
no_license
|
6thfdwp/motionplanning
|
https://github.com/6thfdwp/motionplanning
|
3a3eb02bbc70c510fbc32b72a9e97830aa379150
|
cec73274f77b49d383b10e8023bde28b705c6af6
|
refs/heads/master
| 2020-06-06T13:55:13.644295 | 2013-09-03T09:51:52 | 2013-09-03T09:51:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from motionplanning.ASVPlanner import ASVPlanner
testcases = ['3ASV-t1.txt', '3ASV-variable-x4.txt', '3ASV-fixed-x4.txt', '7ASV-x4.txt']
tuningPara = [ [(150,0.1), (150,0.2), (200,0.1) (200,0.2)], [(150,0.1), (150,0.2), (200,0.1) (200,0.2)], [(150,0.1), (150,0.2), (200,0.1) (200,0.2)], [(150,0.1), (150,0.2), (200,0.1) (200,0.2)] ]
for i, case in enumerate(testcases):
paras = tuningPara[i]
for para in paras: # may try different samples and radius
sampleNum, radius = para
ASVPlanner(case, sampleNum, radius)
|
UTF-8
|
Python
| false | false | 2,013 |
4,587,025,072,512 |
36e2a4d1425f41018b79c6bd10631c6112137d5d
|
1643f3c0e0b1898e399ce36a627374b5e61a1901
|
/idipmap.py
|
bf5043a92b17e3875a19247f7d89755646c9b356
|
[] |
no_license
|
svk/idip
|
https://github.com/svk/idip
|
8bd987213b393eae38ab95570ce8ccd815a27bb6
|
5ec8a7080d5864a503ce7584c3a89ca251552026
|
refs/heads/master
| 2016-09-05T23:40:46.573333 | 2010-02-15T11:13:13 | 2010-02-15T11:13:13 | 518,524 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from idip import Board, Nation, Province
def createStandardBoard():
board = Board()
board.addNation( Nation( "France", adjective = "French" ) )
board.addNation( Nation( "England", adjective = "English" ) )
board.addNation( Nation( "Germany", adjective = "German" ) )
board.addNation( Nation( "Russia", adjective = "Russian" ) )
board.addNation( Nation( "Austria", adjective = "Austrian" ) )
board.addNation( Nation( "Turkey", adjective = "Turkish" ) )
board.addNation( Nation( "Italy", adjective = "Italian" ) )
provinces = [
# Africa
Province( "Naf", "North Africa" ).addCoast(),
Province( "Tun", "Tunis" ).makeSupply().addCoast(),
# Austria
Province( "Boh", "Bohemia" ),
Province( "Bud", "Budapest" ).makeSupply(),
Province( "Gal", "Galicia" ),
Province( "Tri", "Trieste" ).makeSupply().addCoast(),
Province( "Tyr", "Tyrolia" ),
Province( "Vie", "Vienna" ).makeSupply(),
# Balkans
Province( "Bul", "Bulgaria" ).addCoast("EC").addCoast("SC").makeSupply(),
Province( "Gre", "Greece" ).addCoast().makeSupply(),
Province( "Rum", "Rumania" ).addCoast().makeSupply(),
Province( "Ser", "Serbia" ).makeSupply(),
Province( "Alb", "Albania" ).addCoast(),
# England
Province( "Cly", "Clyde" ).addCoast(),
Province( "Edi", "Edinburgh" ).addCoast().makeSupply(),
Province( "Lvp", "Liverpool" ).addCoast().makeSupply(),
Province( "Lon", "London" ).addCoast().makeSupply(),
Province( "Wal", "Wales" ).addCoast(),
Province( "Yor", "Yorkshire" ).addCoast(),
# France
Province( "Bre", "Brest" ).addCoast().makeSupply(),
Province( "Bur", "Burgundy" ),
Province( "Gas", "Gascony" ).addCoast(),
Province( "Mar", "Marseilles" ).addCoast().makeSupply(),
Province( "Par", "Paris" ).makeSupply(),
Province( "Pic", "Picardy" ).addCoast(),
# Germany
Province( "Ber", "Berlin" ).addCoast().makeSupply(),
Province( "Kie", "Kiel" ).addCoast().makeSupply(),
Province( "Mun", "Munich" ).makeSupply(),
Province( "Pru", "Prussia" ).addCoast(),
Province( "Ruh", "Ruhr" ),
Province( "Sil", "Silesia" ),
# Iberia
Province( "Spa", "Spain" ).addCoast("NC").addCoast("SC").makeSupply(),
Province( "Por", "Portugal" ).addCoast().makeSupply(),
# Italy
Province( "Apu", "Apulia" ).addCoast(),
Province( "Nap", "Naples" ).addCoast().makeSupply(),
Province( "Pie", "Piedmont" ).addCoast(),
Province( "Rom", "Rome" ).addCoast().makeSupply(),
Province( "Tus", "Tuscany" ).addCoast(),
Province( "Ven", "Venice" ).addCoast().makeSupply(),
# Low Countries
Province( "Bel", "Belgium" ).addCoast().makeSupply(),
Province( "Hol", "Holland" ).addCoast().makeSupply(),
# Russia
Province( "Fin", "Finland" ).addCoast(),
Province( "Lvn", "Livonia" ).addCoast(),
Province( "Mos", "Moscow" ).makeSupply(),
Province( "Sev", "Sevastopol" ).addCoast().makeSupply(),
Province( "Stp", "St. Petersburg" ).addCoast("NC").addCoast("SC").makeSupply(),
Province( "Ukr", "Ukraine" ),
Province( "War", "Warsaw" ).makeSupply(),
# Denmark
Province( "Den", "Denmark" ).addCoast().makeSupply(),
Province( "Nwy", "Norway" ).addCoast().makeSupply(),
Province( "Swe", "Sweden" ).addCoast().makeSupply(),
# Turkey
Province( "Con", "Constantinople").addCoast().makeSupply(),
Province( "Ank", "Ankara").addCoast().makeSupply(),
Province( "Smy", "Smyrna").addCoast().makeSupply(),
Province( "Arm", "Armenia" ).addCoast(),
Province( "Syr", "Syria" ).addCoast(),
# Atlantic
Province( "ENG", "English Channel" ),
Province( "HEL", "Helgoland Blight" ),
Province( "IRI", "Irish Sea" ),
Province( "MAO", "Mid-Atlantic Ocean" ),
Province( "NAO", "North-Atlantic Ocean" ),
Province( "NTH", "North Sea" ),
Province( "SKA", "Skagerrak" ),
Province( "NWG", "Norwegian Sea" ),
Province( "BAR", "Barents Sea" ),
# Baltic
Province( "BAL", "Baltic Sea" ),
Province( "BOT", "Gulf of Bothnia" ),
# Mediterranean
Province( "BLA", "Black Sea" ),
Province( "AEG", "Aegean Sea" ),
Province( "ADR", "Adriatic Sea" ),
Province( "EAS", "Eastern Mediterranean Sea" ),
Province( "LYO", "Gulf of Lyons" ),
Province( "ION", "Ionian Sea" ),
Province( "TYS", "Tyrrhenian Sea" ),
Province( "WES", "Western Mediterranean Sea" ),
]
for province in provinces:
board.addProvince( province )
board.provinces.Lvp.setUnit( board.nations.England, "army" )
board.provinces.Edi.coast().setUnit( board.nations.England, "fleet" )
board.provinces.Lon.coast().setUnit( board.nations.England, "fleet" )
board.nations.England.addHome( board.provinces.Lvp )
board.nations.England.addHome( board.provinces.Edi )
board.nations.England.addHome( board.provinces.Lon )
board.provinces.Par.setUnit( board.nations.France, "army" )
board.provinces.Mar.setUnit( board.nations.France, "army" )
board.provinces.Bre.coast().setUnit( board.nations.France, "fleet" )
board.nations.France.addHome( board.provinces.Par )
board.nations.France.addHome( board.provinces.Mar )
board.nations.France.addHome( board.provinces.Bre )
board.provinces.Mun.setUnit( board.nations.Germany, "army" )
board.provinces.Ber.setUnit( board.nations.Germany, "army" )
board.provinces.Kie.coast().setUnit( board.nations.Germany, "fleet" )
board.nations.Germany.addHome( board.provinces.Mun )
board.nations.Germany.addHome( board.provinces.Ber )
board.nations.Germany.addHome( board.provinces.Kie )
board.provinces.Ven.setUnit( board.nations.Italy, "army" )
board.provinces.Rom.setUnit( board.nations.Italy, "army" )
board.provinces.Nap.coast().setUnit( board.nations.Italy, "fleet" )
board.nations.Italy.addHome( board.provinces.Ven )
board.nations.Italy.addHome( board.provinces.Rom )
board.nations.Italy.addHome( board.provinces.Nap )
board.provinces.Vie.setUnit( board.nations.Austria, "army" )
board.provinces.Bud.setUnit( board.nations.Austria, "army" )
board.provinces.Tri.coast().setUnit( board.nations.Austria, "fleet" )
board.nations.Austria.addHome( board.provinces.Vie )
board.nations.Austria.addHome( board.provinces.Bud )
board.nations.Austria.addHome( board.provinces.Tri )
board.provinces.Con.setUnit( board.nations.Turkey, "army" )
board.provinces.Smy.setUnit( board.nations.Turkey, "army" )
board.provinces.Ank.coast().setUnit( board.nations.Turkey, "fleet" )
board.nations.Turkey.addHome( board.provinces.Con )
board.nations.Turkey.addHome( board.provinces.Smy )
board.nations.Turkey.addHome( board.provinces.Ank )
board.provinces.War.setUnit( board.nations.Russia, "army" )
board.provinces.Mos.setUnit( board.nations.Russia, "army" )
board.provinces.Sev.coast().setUnit( board.nations.Russia, "fleet" )
board.provinces.Stp.coast("SC").setUnit( board.nations.Russia, "fleet" )
board.nations.Russia.addHome( board.provinces.War )
board.nations.Russia.addHome( board.provinces.Mos )
board.nations.Russia.addHome( board.provinces.Sev )
board.nations.Russia.addHome( board.provinces.Stp )
# Turkey
board.provinces.Ank.coast().link( board.provinces.Con.coast() )
board.provinces.Con.coast().link( board.provinces.Smy.coast() )
board.provinces.Ank.coast().link( board.provinces.BLA )
board.provinces.Con.coast().link( board.provinces.BLA )
board.provinces.Con.coast().link( board.provinces.AEG )
board.provinces.Smy.coast().link( board.provinces.AEG )
board.provinces.Smy.coast().link( board.provinces.EAS )
board.provinces.Con.link( board.provinces.Ank )
board.provinces.Con.link( board.provinces.Smy )
board.provinces.Smy.link( board.provinces.Ank )
board.provinces.Ank.link( board.provinces.Arm )
board.provinces.Arm.link( board.provinces.Smy )
board.provinces.Smy.link( board.provinces.Syr )
board.provinces.Syr.link( board.provinces.Arm )
board.provinces.Con.link( board.provinces.Bul )
# Russia
board.provinces.Stp.coast("NC").link( board.provinces.BAR )
board.provinces.Stp.coast("SC").link( board.provinces.BOT )
board.provinces.Lvn.coast().link( board.provinces.BOT )
board.provinces.Lvn.coast().link( board.provinces.BAL )
board.provinces.Sev.coast().link( board.provinces.BLA )
board.provinces.Lvn.link( board.provinces.War )
board.provinces.Lvn.link( board.provinces.Mos )
board.provinces.Lvn.link( board.provinces.Stp )
board.provinces.War.link( board.provinces.Ukr )
board.provinces.War.link( board.provinces.Mos )
board.provinces.Ukr.link( board.provinces.War )
board.provinces.Ukr.link( board.provinces.Mos )
board.provinces.Ukr.link( board.provinces.Sev )
board.provinces.Sev.link( board.provinces.Mos )
board.provinces.Sev.link( board.provinces.Ukr )
board.provinces.Sev.link( board.provinces.Arm )
# Coasts
board.provinces.Stp.coast("NC").link( board.provinces.BAR )
board.provinces.Stp.coast("NC").link( board.provinces.Nwy.coast() )
board.provinces.Nwy.coast().link( board.provinces.BAR )
board.provinces.Nwy.coast().link( board.provinces.NWG )
board.provinces.Nwy.coast().link( board.provinces.NTH )
board.provinces.Nwy.coast().link( board.provinces.SKA )
board.provinces.Nwy.coast().link( board.provinces.Swe.coast() )
board.provinces.Swe.coast().link( board.provinces.SKA )
board.provinces.Swe.coast().link( board.provinces.Den.coast() )
board.provinces.Swe.coast().link( board.provinces.BAL )
board.provinces.Swe.coast().link( board.provinces.BOT )
board.provinces.Swe.coast().link( board.provinces.Fin.coast() )
board.provinces.Fin.coast().link( board.provinces.BOT )
board.provinces.Fin.coast().link( board.provinces.Stp.coast("SC") )
board.provinces.Stp.coast("SC").link( board.provinces.BOT )
board.provinces.Stp.coast("SC").link( board.provinces.Lvn.coast() )
board.provinces.Lvn.coast().link( board.provinces.BOT )
board.provinces.Lvn.coast().link( board.provinces.BAL )
board.provinces.Lvn.coast().link( board.provinces.Pru.coast() )
board.provinces.Pru.coast().link( board.provinces.BAL )
board.provinces.Pru.coast().link( board.provinces.Ber.coast() )
board.provinces.Ber.coast().link( board.provinces.BAL )
board.provinces.Ber.coast().link( board.provinces.Kie.coast() )
board.provinces.Kie.coast().link( board.provinces.BAL )
board.provinces.Kie.coast().link( board.provinces.HEL )
board.provinces.Kie.coast().link( board.provinces.Den.coast() )
board.provinces.Den.coast().link( board.provinces.BAL )
board.provinces.Den.coast().link( board.provinces.SKA )
board.provinces.Den.coast().link( board.provinces.NTH )
board.provinces.Den.coast().link( board.provinces.HEL )
board.provinces.Den.coast().link( board.provinces.Kie.coast() )
board.provinces.Kie.coast().link( board.provinces.Hol.coast() )
board.provinces.Hol.coast().link( board.provinces.NTH )
board.provinces.Hol.coast().link( board.provinces.HEL )
board.provinces.Hol.coast().link( board.provinces.Bel.coast() )
board.provinces.Bel.coast().link( board.provinces.NTH )
board.provinces.Bel.coast().link( board.provinces.ENG )
board.provinces.Bel.coast().link( board.provinces.Pic.coast() )
board.provinces.Pic.coast().link( board.provinces.ENG )
board.provinces.Pic.coast().link( board.provinces.Bre.coast() )
board.provinces.Bre.coast().link( board.provinces.ENG )
board.provinces.Bre.coast().link( board.provinces.MAO )
board.provinces.Bre.coast().link( board.provinces.Gas.coast() )
board.provinces.Gas.coast().link( board.provinces.MAO )
board.provinces.Gas.coast().link( board.provinces.Spa.coast("NC") )
board.provinces.Spa.coast("NC").link( board.provinces.MAO )
board.provinces.Spa.coast("NC").link( board.provinces.Por )
board.provinces.Por.coast().link( board.provinces.MAO )
board.provinces.Por.coast().link( board.provinces.Spa.coast("SC") )
board.provinces.Spa.coast("SC").link( board.provinces.MAO )
board.provinces.Spa.coast("SC").link( board.provinces.WES )
board.provinces.Spa.coast("SC").link( board.provinces.LYO )
board.provinces.Spa.coast("SC").link( board.provinces.Mar.coast() )
board.provinces.Mar.coast().link( board.provinces.LYO )
board.provinces.Mar.coast().link( board.provinces.Pie.coast() )
board.provinces.Pie.coast().link( board.provinces.LYO )
board.provinces.Pie.coast().link( board.provinces.Tus.coast() )
board.provinces.Tus.coast().link( board.provinces.LYO )
board.provinces.Tus.coast().link( board.provinces.TYS )
board.provinces.Tus.coast().link( board.provinces.Rom.coast() )
board.provinces.Rom.coast().link( board.provinces.TYS )
board.provinces.Rom.coast().link( board.provinces.Nap.coast() )
board.provinces.Nap.coast().link( board.provinces.TYS )
board.provinces.Nap.coast().link( board.provinces.ION )
board.provinces.Nap.coast().link( board.provinces.Apu.coast() )
board.provinces.Apu.coast().link( board.provinces.ION )
board.provinces.Apu.coast().link( board.provinces.ADR )
board.provinces.Apu.coast().link( board.provinces.Ven.coast() )
board.provinces.Ven.coast().link( board.provinces.ADR )
board.provinces.Ven.coast().link( board.provinces.Tri.coast() )
board.provinces.Tri.coast().link( board.provinces.ADR )
board.provinces.Tri.coast().link( board.provinces.Alb.coast() )
board.provinces.Alb.coast().link( board.provinces.ADR )
board.provinces.Alb.coast().link( board.provinces.ION )
board.provinces.Alb.coast().link( board.provinces.Gre.coast() )
board.provinces.Gre.coast().link( board.provinces.ION )
board.provinces.Gre.coast().link( board.provinces.AEG )
board.provinces.Gre.coast().link( board.provinces.Bul.coast("SC") )
board.provinces.Bul.coast("SC").link( board.provinces.AEG )
board.provinces.Bul.coast("SC").link( board.provinces.Con.coast() )
board.provinces.Con.coast().link( board.provinces.AEG )
board.provinces.Con.coast().link( board.provinces.Bul.coast("EC") )
board.provinces.Con.coast().link( board.provinces.BLA )
board.provinces.Bul.coast("EC").link( board.provinces.BLA )
board.provinces.Bul.coast("EC").link( board.provinces.Rum.coast() )
board.provinces.Rum.coast().link( board.provinces.BLA )
board.provinces.Rum.coast().link( board.provinces.Sev.coast() )
board.provinces.Sev.coast().link( board.provinces.BLA )
board.provinces.Sev.coast().link( board.provinces.Arm.coast() )
board.provinces.Arm.coast().link( board.provinces.BLA )
board.provinces.Arm.coast().link( board.provinces.Ank.coast() )
board.provinces.Ank.coast().link( board.provinces.BLA )
board.provinces.Ank.coast().link( board.provinces.Con.coast() )
board.provinces.Con.coast().link( board.provinces.Smy.coast() )
board.provinces.Smy.coast().link( board.provinces.AEG )
board.provinces.Smy.coast().link( board.provinces.EAS )
board.provinces.Smy.coast().link( board.provinces.Syr.coast() )
board.provinces.Syr.coast().link( board.provinces.EAS )
board.provinces.Naf.coast().link( board.provinces.MAO )
board.provinces.Naf.coast().link( board.provinces.WES )
board.provinces.Naf.coast().link( board.provinces.Tun.coast() )
board.provinces.Tun.coast().link( board.provinces.WES )
board.provinces.Tun.coast().link( board.provinces.TYS )
board.provinces.Tun.coast().link( board.provinces.ION )
board.provinces.Cly.coast().link( board.provinces.NWG )
board.provinces.Cly.coast().link( board.provinces.NAO )
board.provinces.Cly.coast().link( board.provinces.Lvp.coast() )
board.provinces.Lvp.coast().link( board.provinces.NAO )
board.provinces.Lvp.coast().link( board.provinces.IRI )
board.provinces.Lvp.coast().link( board.provinces.Wal.coast() )
board.provinces.Wal.coast().link( board.provinces.IRI )
board.provinces.Wal.coast().link( board.provinces.ENG )
board.provinces.Wal.coast().link( board.provinces.Lon.coast() )
board.provinces.Lon.coast().link( board.provinces.ENG )
board.provinces.Lon.coast().link( board.provinces.NTH )
board.provinces.Lon.coast().link( board.provinces.Yor.coast() )
board.provinces.Yor.coast().link( board.provinces.NTH )
board.provinces.Yor.coast().link( board.provinces.Edi.coast() )
board.provinces.Edi.coast().link( board.provinces.NTH )
board.provinces.Edi.coast().link( board.provinces.NWG )
board.provinces.Edi.coast().link( board.provinces.Cly.coast() )
# Ocean links
board.provinces.BAR.linkMultiple( [ board.provinces.NWG ] )
board.provinces.NWG.linkMultiple( [ board.provinces.BAR,
board.provinces.NAO,
board.provinces.NTH ] )
board.provinces.NAO.linkMultiple( [ board.provinces.NWG,
board.provinces.IRI,
board.provinces.MAO ] )
board.provinces.NTH.linkMultiple( [ board.provinces.NWG,
board.provinces.SKA,
board.provinces.HEL,
board.provinces.ENG ] )
board.provinces.SKA.linkMultiple( [ board.provinces.NTH ] )
board.provinces.BAL.linkMultiple( [ board.provinces.BOT ] )
board.provinces.BOT.linkMultiple( [ board.provinces.BAL ] )
board.provinces.HEL.linkMultiple( [ board.provinces.NTH ] )
board.provinces.IRI.linkMultiple( [ board.provinces.NAO,
board.provinces.MAO,
board.provinces.ENG ] )
board.provinces.MAO.linkMultiple( [ board.provinces.NAO,
board.provinces.IRI,
board.provinces.ENG,
board.provinces.WES ] )
board.provinces.WES.linkMultiple( [ board.provinces.MAO,
board.provinces.LYO,
board.provinces.TYS ] )
board.provinces.LYO.linkMultiple( [ board.provinces.WES,
board.provinces.TYS ] )
board.provinces.TYS.linkMultiple( [ board.provinces.WES,
board.provinces.LYO,
board.provinces.ION ] )
board.provinces.ION.linkMultiple( [ board.provinces.TYS,
board.provinces.ADR,
board.provinces.AEG,
board.provinces.EAS ] )
board.provinces.ADR.linkMultiple( [ board.provinces.ION ] )
board.provinces.AEG.linkMultiple( [ board.provinces.ION,
board.provinces.EAS ] )
board.provinces.EAS.linkMultiple( [ board.provinces.ION,
board.provinces.AEG ] )
board.provinces.BLA.linkMultiple( [] )
board.provinces.Nwy.linkMultiple( [
board.provinces.Swe,
board.provinces.Fin,
board.provinces.Stp,
] )
board.provinces.Swe.linkMultiple( [
board.provinces.Nwy,
board.provinces.Den,
board.provinces.Fin,
] )
board.provinces.Fin.linkMultiple( [
board.provinces.Nwy,
board.provinces.Swe,
board.provinces.Stp,
] )
board.provinces.Stp.linkMultiple( [
board.provinces.Nwy,
board.provinces.Fin,
board.provinces.Lvn,
board.provinces.Mos,
] )
board.provinces.Lvn.linkMultiple( [
board.provinces.Pru,
board.provinces.War,
board.provinces.Mos,
board.provinces.Stp,
] )
board.provinces.Mos.linkMultiple( [
board.provinces.Stp,
board.provinces.Lvn,
board.provinces.War,
board.provinces.Ukr,
board.provinces.Sev,
] )
board.provinces.Sev.linkMultiple( [
board.provinces.Mos,
board.provinces.Ukr,
board.provinces.Rum,
board.provinces.Arm,
] )
board.provinces.Arm.linkMultiple( [
board.provinces.Sev,
board.provinces.Ank,
board.provinces.Smy,
board.provinces.Syr,
] )
board.provinces.Ank.linkMultiple( [
board.provinces.Con,
board.provinces.Smy,
board.provinces.Arm,
] )
board.provinces.Smy.linkMultiple( [
board.provinces.Con,
board.provinces.Ank,
board.provinces.Arm,
board.provinces.Syr,
] )
board.provinces.Syr.linkMultiple( [
board.provinces.Arm,
board.provinces.Smy,
] )
board.provinces.Con.linkMultiple( [
board.provinces.Bul,
board.provinces.Smy,
board.provinces.Ank,
] )
board.provinces.Bul.linkMultiple( [
board.provinces.Con,
board.provinces.Gre,
board.provinces.Ser,
board.provinces.Rum,
] )
board.provinces.Rum.linkMultiple( [
board.provinces.Bul,
board.provinces.Ser,
board.provinces.Bud,
board.provinces.Gal,
board.provinces.Ukr,
board.provinces.Sev,
] )
board.provinces.Ukr.linkMultiple( [
board.provinces.War,
board.provinces.Mos,
board.provinces.Sev,
board.provinces.Rum,
board.provinces.Gal,
] )
board.provinces.Pru.linkMultiple( [
board.provinces.Ber,
board.provinces.Sil,
board.provinces.War,
board.provinces.Lvn,
] )
board.provinces.War.linkMultiple( [
board.provinces.Pru,
board.provinces.Sil,
board.provinces.Gal,
board.provinces.Ukr,
board.provinces.Mos,
board.provinces.Lvn,
] )
board.provinces.Gal.linkMultiple( [
board.provinces.Sil,
board.provinces.Boh,
board.provinces.Vie,
board.provinces.Bud,
board.provinces.Rum,
board.provinces.Ukr,
board.provinces.War,
] )
board.provinces.Bud.linkMultiple( [
board.provinces.Gal,
board.provinces.Vie,
board.provinces.Tri,
board.provinces.Ser,
board.provinces.Rum,
] )
board.provinces.Ser.linkMultiple( [
board.provinces.Bud,
board.provinces.Tri,
board.provinces.Alb,
board.provinces.Gre,
board.provinces.Bul,
board.provinces.Rum,
] )
board.provinces.Alb.linkMultiple( [
board.provinces.Tri,
board.provinces.Ser,
board.provinces.Gre,
] )
board.provinces.Gre.linkMultiple( [
board.provinces.Alb,
board.provinces.Ser,
board.provinces.Bul,
] )
board.provinces.Sil.linkMultiple( [
board.provinces.Pru,
board.provinces.Ber,
board.provinces.Mun,
board.provinces.Boh,
board.provinces.Gal,
board.provinces.War,
] )
board.provinces.Boh.linkMultiple( [
board.provinces.Sil,
board.provinces.Mun,
board.provinces.Tyr,
board.provinces.Vie,
board.provinces.Gal,
] )
board.provinces.Vie.linkMultiple( [
board.provinces.Boh,
board.provinces.Tyr,
board.provinces.Tri,
board.provinces.Bud,
board.provinces.Gal,
] )
board.provinces.Tri.linkMultiple( [
board.provinces.Alb,
board.provinces.Ser,
board.provinces.Bud,
board.provinces.Vie,
board.provinces.Tyr,
board.provinces.Ven,
] )
board.provinces.Ber.linkMultiple( [
board.provinces.Kie,
board.provinces.Mun,
board.provinces.Sil,
board.provinces.Pru,
] )
board.provinces.Kie.linkMultiple( [
board.provinces.Ber,
board.provinces.Mun,
board.provinces.Ruh,
board.provinces.Hol,
board.provinces.Den,
] )
board.provinces.Den.linkMultiple( [
board.provinces.Kie,
board.provinces.Swe,
] )
board.provinces.Hol.linkMultiple( [
board.provinces.Bel,
board.provinces.Ruh,
board.provinces.Kie,
] )
board.provinces.Ruh.linkMultiple( [
board.provinces.Bel,
board.provinces.Hol,
board.provinces.Kie,
board.provinces.Mun,
board.provinces.Bur,
] )
board.provinces.Bel.linkMultiple( [
board.provinces.Pic,
board.provinces.Bur,
board.provinces.Ruh,
board.provinces.Hol,
] )
board.provinces.Tyr.linkMultiple( [
board.provinces.Boh,
board.provinces.Mun,
board.provinces.Vie,
board.provinces.Tri,
board.provinces.Ven,
board.provinces.Pie,
] )
board.provinces.Pie.linkMultiple( [
board.provinces.Mar,
board.provinces.Tyr,
board.provinces.Ven,
board.provinces.Tus,
] )
board.provinces.Tus.linkMultiple( [
board.provinces.Pie,
board.provinces.Ven,
board.provinces.Rom,
] )
board.provinces.Ven.linkMultiple( [
board.provinces.Tyr,
board.provinces.Tri,
board.provinces.Pie,
board.provinces.Tus,
board.provinces.Rom,
board.provinces.Apu,
] )
board.provinces.Rom.linkMultiple( [
board.provinces.Tus,
board.provinces.Ven,
board.provinces.Apu,
board.provinces.Nap,
] )
board.provinces.Apu.linkMultiple( [
board.provinces.Ven,
board.provinces.Rom,
board.provinces.Nap,
] )
board.provinces.Nap.linkMultiple( [
board.provinces.Rom,
board.provinces.Apu,
] )
board.provinces.Pic.linkMultiple( [
board.provinces.Bel,
board.provinces.Bur,
board.provinces.Par,
board.provinces.Bre,
] )
board.provinces.Bur.linkMultiple( [
board.provinces.Pic,
board.provinces.Par,
board.provinces.Gas,
board.provinces.Mar,
board.provinces.Mun,
board.provinces.Ruh,
board.provinces.Bel,
] )
board.provinces.Mar.linkMultiple( [
board.provinces.Spa,
board.provinces.Gas,
board.provinces.Bur,
board.provinces.Pie,
] )
board.provinces.Spa.linkMultiple( [
board.provinces.Por,
board.provinces.Gas,
board.provinces.Mar,
] )
board.provinces.Por.linkMultiple( [
board.provinces.Spa,
] )
board.provinces.Gas.linkMultiple( [
board.provinces.Spa,
board.provinces.Mar,
board.provinces.Bur,
board.provinces.Par,
board.provinces.Bre,
] )
board.provinces.Bre.linkMultiple( [
board.provinces.Pic,
board.provinces.Par,
board.provinces.Gas,
] )
board.provinces.Naf.linkMultiple( [
board.provinces.Tun,
] )
board.provinces.Tun.linkMultiple( [
board.provinces.Naf,
] )
board.provinces.Wal.linkMultiple( [
board.provinces.Lon,
board.provinces.Yor,
board.provinces.Lvp,
] )
board.provinces.Lvp.linkMultiple( [
board.provinces.Wal,
board.provinces.Yor,
board.provinces.Edi,
board.provinces.Cly,
] )
board.provinces.Lon.linkMultiple( [
board.provinces.Wal,
board.provinces.Yor,
] )
board.provinces.Yor.linkMultiple( [
board.provinces.Lon,
board.provinces.Wal,
board.provinces.Lvp,
board.provinces.Edi,
] )
board.provinces.Edi.linkMultiple( [
board.provinces.Cly,
board.provinces.Lvp,
board.provinces.Yor,
] )
board.provinces.Cly.linkMultiple( [
board.provinces.Edi,
board.provinces.Lvp,
] )
return board
if __name__ == '__main__':
board = createStandardBoard()
for line in board.exportState():
print( line )
print()
nblist = []
for province in board.provinces.values():
nblist.append( (len( province.neighbours()), province.displayName ) )
nblist.sort()
for n, name in nblist:
print( name, n )
|
UTF-8
|
Python
| false | false | 2,010 |
6,365,141,558,523 |
a61d7dd3832dc7281878514f2ac023793e249cfa
|
c8ced70b90acba51322a7e484cbe7dec01b3f949
|
/searchModMail.py
|
d524632dc5773004f5838207a203953a73f31a07
|
[] |
no_license
|
boibboib/searchModMail
|
https://github.com/boibboib/searchModMail
|
b9bc0c6cd76487917da7a11fd1cc6aaa5a2b2027
|
264b5eedb6983dd9af0c94f98d252578ac38de0e
|
refs/heads/master
| 2020-05-18T02:19:41.212049 | 2014-11-26T19:40:10 | 2014-11-26T19:40:10 | 26,965,934 | 0 | 1 | null | false | 2015-02-05T21:25:26 | 2014-11-21T15:43:54 | 2014-11-26T19:40:11 | 2014-11-26T19:40:10 | 120 | 0 | 1 | 1 |
Python
| null | null |
import praw
import re
import sys
def doReplies (replies, searchStr):
for reply in replies:
#if reply.author:
# if reply.author.name == "drocklin":
# print(reply.body)
#
#if "2i5uxj" in reply.parent_id:
# print (reply.body)
if re.search(searchStr, reply.body, re.I):
print ("https://www.reddit.com/message/messages/" + reply.parent_id[3:])
return True
if reply.replies:
if doReplies(reply.replies, searchStr):
return True
return False
if __name__=='__main__':
#
# init and log into reddit
#
SUBREDDIT = "books"
print("==============================")
r = praw.Reddit(user_agent="/u/boib readModMail")
# so that reddit wont translate '>' into '>'
r.config.decode_html_entities = True
r.login("password", "username")
print("==============================")
if len(sys.argv) > 1:
searchStr = sys.argv[1]
else:
print ("enter search term on cmd line")
# try:
if True:
inbox = r.get_mod_mail(subreddit='books', limit=1000)
if not inbox:
print ("no messages")
else:
for inboxMsg in inbox:
if re.search(searchStr, inboxMsg.body, re.I):
print ("https://www.reddit.com/message/messages/" + inboxMsg.fullname[3:])
continue
if inboxMsg.replies:
doReplies(inboxMsg.replies, searchStr)
# except Exception as e:
# print('An error has occured: %s ' % (e))
|
UTF-8
|
Python
| false | false | 2,014 |
17,695,265,274,759 |
66ebcba23f09841f16411bf9fed034774a5f0c90
|
10cb51c0e5089cf9c940fbcadd03222b01411d46
|
/renux/ImageIndex.py
|
f92e568b8fa4f57c8c0840d0605f5d447e82d823
|
[] |
no_license
|
faux/renux
|
https://github.com/faux/renux
|
090e03dcd15af288a3955edd65a3b216ce7b9a79
|
5c4905e50c275a7c53105a2961dda2628ae53ad8
|
refs/heads/master
| 2020-06-04T22:05:45.630932 | 2010-04-30T01:08:43 | 2010-04-30T02:55:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import base64
import re
re_url_safe = re.compile("[^A-Za-z0-9_]")
re_drop_ext = re.compile("\.\w+$")
separator = "_ANY_STRING_WILL_DO_AS_A_SEPARATOR"
encoded_doc_template = """/*
Content-Type: multipart/related; boundary="%s"
*/
%%(css_items)s
""" % separator
mhtml_item_template = """--%s
Content-Location:%%(safe_name)s
Content-Transfer-Encoding:base64
%%(b64)s""" % separator
css_item_template = """.%(safe_name)s {/*
%(mhtml)s
*/background-image: url("data:%(mime)s;base64,%(b64)s");*background-image: url(mhtml:%%(url_path)s!%(safe_name)s);%(custom_css)s}"""
def get_image_size(image):
path = image['path']
try:
from javax.imageio import ImageIO
from java.io import File
img = ImageIO.read(File(path))
width = img.getWidth()
height = img.getHeight()
except ImportError:
from PIL import Image as PILImage
img = PILImage.open(path)
width, height = img.size
image['custom_css'] += "height:%(height)spx;width:%(width)spx;" % {'width': width, 'height': height}
def get_safe_name(image):
return re_url_safe.sub("_", re_drop_ext.sub("", image['filename']))
class Image(dict):
def __init__(self, **kwargs):
super(Image, self).__init__(**kwargs)
self['safe_name'] = get_safe_name(self)
self['custom_css'] = ''
self.encoded = False
def encode(self, custom_method=None):
if not self.encoded:
img = open(self['path'], "rb")
self['b64'] = base64.b64encode(img.read())
img.close()
if custom_method != None:
custom_method(self)
self['mhtml'] = self.mhtml()
self.encoded = True
def mhtml(self):
return mhtml_item_template % self
def css(self):
return css_item_template % self
class new(object):
'''
'''
# ( ext, mimetype )
image_formats = [
('jpg', 'image/jpg'),
('png', 'image/png')
]
encode_methods = []
def __init__(self):
'''
'''
self.images = []
def addpath(self, path):
for filename in os.listdir(path):
for ext, mime in self.image_formats:
if filename.endswith(ext):
img_path = path + os.sep + filename
img = Image(path=img_path, mime=mime, filename=filename)
self.images.append(img)
def add_encode_method(self, method):
self.encode_methods.append(method)
def encode(self, url_path):
def __custom_encode(image):
for m in self.encode_methods:
m(image)
for image in self.images:
image.encode(custom_method=__custom_encode)
return (encoded_doc_template % {
'css_items': '\n'.join(image.css() for image in self.images),
}) % {'url_path': url_path, }
def fix_newline(string):
import StringIO
out_string = StringIO.StringIO()
for i in xrange(len(string)):
if string[i] == '\n':
out_string.write('\r\n')
else:
out_string.write(string[i])
return out_string.getvalue()
def save_imageindex(img_index, filename, url_path):
encoded_imgs = img_index.encode(url_path)
encoded_imgs = fix_newline(encoded_imgs)
file = open(filename, "wb")
file.write(encoded_imgs)
file.close()
|
UTF-8
|
Python
| false | false | 2,010 |
18,270,790,887,879 |
ecfe576df864eb312ffa77511f3bf659ce8ec0b0
|
874e9de25150cdc3013705174e2735cb82a41cfb
|
/scrapyd/pubsub/callbacks.py
|
3639aa94152da8db06a285193f6a0d701b1db89f
|
[
"BSD-3-Clause"
] |
permissive
|
drankinn/scrapyd
|
https://github.com/drankinn/scrapyd
|
1b49fc17d52e0162e7388123194dc1d9d0668720
|
5ed7313b327e06237947e3a1c8a75389d273989b
|
refs/heads/master
| 2021-01-18T06:13:30.463508 | 2014-12-05T08:07:10 | 2014-12-05T08:07:10 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import pprint
import uuid
from scrapyd.interfaces import ISpiderScheduler, IPubSub
__author__ = 'drankinn'
from twisted.python import log
class PubSubCallable:
system = 'LogCallable'
def __init__(self, app, message):
self.app = app
self.message = message
def __call__(self):
log.msg(format="%(msg)s", msg=self.message, system='PubSub:'+self.system)
class LogScheduler(PubSubCallable):
system = 'Scheduler'
class LogLauncher(PubSubCallable):
system = 'LogLauncher'
class ScrapyScheduler(PubSubCallable):
system = 'ScrapyScheduler'
json_decoder = json.JSONDecoder()
json_encoder = json.JSONEncoder()
@property
def scheduler(self):
return self.app.getComponent(ISpiderScheduler)
@property
def pubsub(self):
return self.app.getComponent(IPubSub)
def __call__(self):
print self.message
try:
args = self.json_decoder.decode(self.message)
project = args.pop('project')
spider = args.pop('spider')
if args['_job'] is None:
args['_job'] = uuid.uuid1().hex
self.scheduler.schedule(project, spider, **args)
except ValueError:
pass
|
UTF-8
|
Python
| false | false | 2,014 |
7,224,135,024,099 |
10187316dc42e5777371364944c9caf8f9435a5f
|
48c1f32e581a5989385739e48d268caec431fc26
|
/setup.py
|
16ba3c578da8ef70560b7bd2efc4c9be40f0a85e
|
[] |
no_license
|
bigsoftcms/fusefs-cloudstorage
|
https://github.com/bigsoftcms/fusefs-cloudstorage
|
9b5b44ec8d86dd42a9abe35e303f3e7762324dda
|
4bb7e9285f7e2ba79da7eef167abc83c287b2476
|
refs/heads/master
| 2021-05-26T12:07:45.083239 | 2011-07-14T13:15:01 | 2011-07-14T13:15:01 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='fusefs-cloudstorage',
version='0.1',
description='FUSE-based filesystem for accessing cloud storage such as Rackspace CloudFiles and Amazon S3',
author='Roman Bogorodskiy',
author_email='bogorodskiy@gmail.com',
url='https://github.com/novel/fusefs-cloudstorage',
scripts=['cloudstorage.py',],
)
|
UTF-8
|
Python
| false | false | 2,011 |
11,656,541,263,539 |
a42948ff38b5cc5a070ca4fab87868b6bf34b1ea
|
fce23fa0204cbea8e8aa059d376d7376a7493dad
|
/compiler/p2flattener.py
|
abd128a28bec6b07ca3e388eacc2c1d337fb7c85
|
[] |
no_license
|
smitten0000/ecen5523
|
https://github.com/smitten0000/ecen5523
|
0eae0a4c51f7f1fd94ba1d669f3d8ea82498dcfc
|
6b91f4ccace886353aa9c2c98f7e06ab0ad40c11
|
refs/heads/master
| 2021-03-12T20:21:15.413537 | 2011-12-15T03:53:28 | 2011-12-15T03:53:28 | 6,892,093 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# vim: set ts=4 sw=4 expandtab:
from compiler.ast import *
from comp_util import *
from p1flattener import P1Flattener
from p2explicate import P2Explicate
from p2uniquifyvars import P2UniquifyVars
from p2heapify import P2Heapify
from p2freevars import P2FreeVars
from p2closureconvert import P2ClosureConversion
import operator
class P2Flattener(P1Flattener):
"""Class to performing flattening of complex expressions."""
def __init__ (self, varalloc, validate=False):
P1Flattener.__init__(self, varalloc)
self.validate = validate
def flatten (self, node):
"""Takes an AST as input, and then "flattens" the tree into a list of statements."""
if isinstance(node, Function):
# This is not a Function returned from the parse stage, but a top-level function
# that is created in the closure-conversion pass.
# We just need to flatten the "code" attribute, which is a Stmt.
# Function(decorators, name, argnames, defaults, flags, doc, code, lineno=None)
self.log.debug('in visit_Function, node.code = %s',node.code)
code = self.flatten(node.code)
for x in node.argnames:
self.varalloc.add_var(x)
return Function(node.decorators, node.name, node.argnames, node.defaults, node.flags, node.doc, code, node.lineno)
elif isinstance(node, Return):
x = self.flatten(node.value)
retvar, retstmtlist = self.flatten(node.value)
return retstmtlist + [Return(retvar)]
elif isinstance(node, CallFuncIndirect):
self.log.debug('CallFuncIndirect: args: %s', node.args)
nodevar, nodestmtlist = self.flatten(node.node)
tuplelist = [self.flatten(x) for x in node.args]
varargs = [x[0] for x in tuplelist]
varstmts = [x[1] for x in tuplelist]
varname = self.varalloc.get_next_var()
stmts = nodestmtlist + reduce(lambda x,y: x+y, varstmts, []) + [Assign([AssName(varname, 'OP_ASSIGN')], CallFuncIndirect(nodevar, varargs))]
return (Name(varname), stmts)
else:
return P1Flattener.flatten(self, node)
if __name__ == "__main__":
import compiler, sys
import logging.config
from p0parser import P0Parser
from p1explicate import P1Explicate
if len(sys.argv) < 2:
sys.exit(1)
# configure logging
logging.config.fileConfig('logging.cfg')
testcases = sys.argv[1:]
for testcase in testcases:
p2unique = P2UniquifyVars()
varalloc = VariableAllocator()
p2explicator = P2Explicate(varalloc)
p2heap = P2Heapify(p2explicator)
p2closure = P2ClosureConversion(p2explicator, varalloc)
p2flatten = P2Flattener(varalloc,True)
ast = compiler.parseFile(testcase)
unique = p2unique.transform(ast)
explicated = p2explicator.explicate(unique)
heaped = p2heap.transform(explicated)
astlist = p2closure.transform(heaped)
for ast in astlist:
ast = p2flatten.flatten(ast)
print ast
print prettyAST(ast)
|
UTF-8
|
Python
| false | false | 2,011 |
2,379,411,906,925 |
c035f80ab0e0fbafbcffd058e2caf7128ac90fe4
|
0adc11a19e968072edc41ba925af7b1f34c2584b
|
/xhtml2pdf/version.py
|
0155bbc061da67c8c08d7be0e9900fa2e58145e4
|
[
"Apache-2.0"
] |
permissive
|
ejucovy/xhtml2pdf
|
https://github.com/ejucovy/xhtml2pdf
|
932f59eda037f0c376d3f38b8fb573e4fe1abf42
|
95accb611c6b0cd7bef19a3cc2112e2e65e107b5
|
refs/heads/master
| 2021-01-16T18:11:58.490411 | 2011-05-25T16:30:11 | 2011-05-25T16:30:11 | 1,799,805 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 247 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-08-15 13:37:57 +0200 (Fr, 15 Aug 2008) $"
__version__ = VERSION = "VERSION{3.0.33}VERSION"[8:-8]
__build__ = BUILD = "BUILD{2010-06-16}BUILD"[6:-6]
VERSION_STR = """XHTML2PDF/pisa %s (Build %s)
http://www.xhtml2pdf.com
Copyright 2010 Dirk Holtwick, holtwick.it
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.""" % (
VERSION,
BUILD,
)
|
UTF-8
|
Python
| false | false | 2,011 |
16,209,206,616,831 |
e0b0d58961fcc21912fa59d8b559469f7af2337f
|
a07c50240888730c0dca7575ee87dc5c243e3c41
|
/2_Data_Structures/2.5.4构建一个多线程播客客户程序.py
|
38ada97c457a9f77151efe842d3f932ac6cd0ec1
|
[] |
no_license
|
CoryVegan/PythonSL
|
https://github.com/CoryVegan/PythonSL
|
190449bc783bbba2c4b62102f145fac2057e05f8
|
f4d281d2a0d2526364b62f16c3e6b48aa7b718f2
|
refs/heads/master
| 2020-03-08T12:13:17.588156 | 2014-07-06T02:38:45 | 2014-07-06T02:38:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
#本节构建一个播客客户程序,程序展示了如何用多个线程使用Queue类。这个程序要读入一个或多个RSS提要,对专辑排队来显示最新的5集以供下载,并使用线程并行地处理多个下载。这里没有提供足够的错误处理,所以不能在实际生产环境中使用,不过这个骨架实现可以作为一个很好的例子来说明如何使用Queue模块。
#首先要建立一些操作参数。正常情况下,这些参数来自用户输入(首选项,数据库等)。不过在这个例子中,线程数和要获取的URL列表都使用了硬编码值。
from Queue import Queue
from threading import Thread
import time
import urllib
import urlparse
import feedparser
#Set up some global variables
num_fetch_threads = 2
enclosure_queue = Queue()
# A real app wouldn't use hard-coded data...
feed_urls = ['http://advocacy.python.org/podcasts/littlebit.rss',]
#函数downloadEnclosures()在工作线程中运行,使用urllib来处理下载
def downloadEnclosures(i, q):
"""This is the worker thread function.
It processes items in the queue one after
another. These daemon threads go into an
infinite loop, and only exit when
the main thread ends.
"""
while True:
print '%s: Looking for the next enclosure' % i
url = q.get()
parsed_url = urlparse.urlparse(url)
print '%s: Downloading:' % i, parsed_url.path
response = urllib.urlopen(url)
data = response.read()
# Save the downloaded file to the current directory
outfile_name = url.rpartition('/')[-1]
with open(outfile_name, 'wb') as outfile:
outfile.write(data)
q.task_done()
#一旦定义了线程的目标函数,接下来可以启动工作线程。downloadEnclosures()处理语句url = q.get()时,会阻塞并等待,直到队列返回某个结果。这说明,及时队列中没有任何内容,也可以安全的启动线程。
# Set up some threads to fetch the enclosures
for i in range(num_fetch_threads):
worker = Thread(target=downloadEnclosures, args=(i, enclosure_queue,))
worker.setDaemon(True)
worker.start()
#下一步使用Mark Pilgrim的feedparser模块(www.feedparser.org)获取提要内容,并将这些专辑的URL入队。一旦第一个URL增加到队列,就会有某个工作线程提取这个URL,开始下载。这个循环继续增加元素直到提要全部利用,工作线程会依次将URL出队来下载这些提要
# Download the feed(s) and put the enclosure URLs into the queue.
for url in feed_urls:
response = feedparser.parse(url, agent='fetch_podcasts.py')
for entry in response['entries'][-5:]:
for enclosure in entry.get('enclosures', []):
parsed_url = urlparse.urlparse(enclosure['url'])
print 'Queuing:', parsed_url.path
enclosure_queue.put(enclosure['url'])
#使用join()再次等待队列腾空
# Now wait for the queue to be empty, indicating that we have processed all the downloads.
print '*** Main thread waiting'
enclosure_queue.join()
print '*** Done'
|
UTF-8
|
Python
| false | false | 2,014 |
14,611,478,742,313 |
262d05321d3af9ec82d36d2ff33e2a60e3514ca9
|
41b0aa51a5adaef2f49282a00c09f29dd81e408b
|
/connecticut.py
|
3c6cfd2414ec46da61b9414a194be43979b903a3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
democracyworks/dog-catcher
|
https://github.com/democracyworks/dog-catcher
|
bfe7a30140c45e657ad474568afc44232aa2ebad
|
9f6200084d4505091399d36ab0d5e3379b04588c
|
refs/heads/master
| 2021-01-21T02:00:03.167439 | 2014-10-31T16:54:04 | 2014-10-31T16:54:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib
import re
import sys
import xlrd
import pdfminer
import urllib2
import json
import time
import os
import dogcatcher
import HTMLParser
h = HTMLParser.HTMLParser()
cdir = os.path.dirname(os.path.abspath(__file__)) + "/"
tmpdir = cdir + "tmp/"
voter_state = "CT"
source = "State"
result = [("authority_name", "first_name", "last_name", "town_name", "fips", "county_name",
"street", "city", "address_state", "zip_code",
"po_street", "po_city", "po_state", "po_zip_code",
"reg_authority_name", "reg_first", "reg_last",
"reg_street", "reg_city", "reg_state", "reg_zip_code",
"reg_po_street", "reg_po_city", "reg_po_state", "reg_po_zip_code",
"reg_phone", "reg_fax", "reg_email", "reg_website", "reg_hours",
"phone", "fax", "email", "website", "hours", "voter_state", "source", "review")]
#There are two election offices in CT; each one is in a different PDF. The following section grabs the website and writes it to a file. (Writing it to a file isn't strictly necessary, but saves some time down the line.)
file_path_1 = tmpdir + "connecticut-clerks-1.pdf"
file_path_2 = tmpdir + "connecticut-clerks-2.pdf"
url_1 = "http://www.ct.gov/sots/LIB/sots/ElectionServices/lists/TownClerkList.pdf"
url_2 = "http://www.sots.ct.gov/sots/lib/sots/electionservices/lists/rovofficeaddresses.pdf"
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent' : user_agent}
req_1 = urllib2.Request(url_1, headers=headers)
pdf_1 = urllib2.urlopen(req_1).read()
data_1 = dogcatcher.pdf_to_text(pdf_1)
output = open(file_path_1, "w")
output.write(data_1)
output.close()
req_2 = urllib2.Request(url_2, headers=headers)
pdf_2 = urllib2.urlopen(req_2).read()
data_2 = dogcatcher.pdf_to_text(pdf_2)
output = open(file_path_2, "w")
output.write(data_2)
output.close()
absdata = open(file_path_1).read()
regdata = open(file_path_2).read()
#Check to make sure that W I doesn't appear in the source documents before running.
absdata = dogcatcher.po_standardize(absdata.replace("W I","WI").replace("","").replace("ONE FIRST","1 FIRST"))
regdata = dogcatcher.po_standardize(regdata.replace("W I","WI").replace("","").replace("ONE FIRST","1 FIRST"))
absdata = absdata.replace("\nN. ","\nNorth ")
regdata = regdata.replace("N. S","North S")
header_re = re.compile(".+?\d{2}:\d{2}:\d{2} [AP]M", re.DOTALL)
for item in header_re.findall(absdata):
absdata = absdata.replace(item,"")
abstown_re = re.compile("([A-Z][A-Z].+?TOWN CLERK.+?)\n\n", re.DOTALL)
regtown_re = re.compile("REGISTRAR[S]* OF .+?CT\s*\d{5}[-\d]*\n\n", re.DOTALL)
regtown_name_re = re.compile("REGIS.+?, (.+)")
abstown_name_re = re.compile("(.+) TOWN CLERK")
party_re = re.compile(" [\[\(].+?[\)\]]")
#This will fail if the only address is a PO Box [A-Z]. Check data for this beforehand.
address_re = re.compile("([^\n]*\d.+?)\n\n",re.DOTALL)
abs_address_re = re.compile("([^\n]*\d.+?)[\s]+Bus",re.DOTALL)
csz_re = re.compile(".+?, *[A-Z][A-Z] *\d{5}[\d-]*")
city_re = re.compile("(.+?),")
state_re = re.compile(" ([A-Z][A-Z]) ")
zip_re = re.compile(" (\d{5}[\d-]*)")
po_re = re.compile("P*\.*O*\.* *[BD][OR][XA].+")
phone_re = re.compile("Bus: (.+)")
fax_re = re.compile("Fax: (.....+)")
email_re = re.compile("Email: (.+)")
name_re = re.compile(".+")
abstowns = abstown_re.findall(absdata)
regtowns = regtown_re.findall(regdata)
abse = []
reg = []
#The towns came out of the PDF in the lord only knows what order. So we first extract town names from each town and create a list of [town, town_name] pairs in both the registration and absentee data.
#We then sort both lists by town name.
for town in abstowns:
regtown = regtowns[abstowns.index(town)]
regtown_name = " ".join(regtown_name_re.findall(regtown)[0].title().strip().split())
abstown_name = " ".join(abstown_name_re.findall(town)[0].title().split())
for party in party_re.findall(abstown_name): #The town names also have a party affiliation contained within. Here, we strip that out.
abstown_name = abstown_name.replace(party,"").strip()
abse.append([town, abstown_name])
reg.append([regtown, regtown_name])
abse.sort(lambda x, y: cmp(x[1],y[1]))
reg.sort(lambda x, y: cmp(x[1],y[1]))
for item in abse:
authority_name, first_name, last_name, county_name, town_name, fips, street, city, address_state, zip_code, po_street, po_city, po_state, po_zip_code, reg_authority_name, reg_first, reg_last, reg_street, reg_city, reg_state, reg_zip_code, reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code, reg_phone, reg_fax, reg_email, reg_website, reg_hours, phone, fax, email, website, hours, review = dogcatcher.begin(voter_state)
authority_name = "Town Clerk"
reg_authority_name = "Registrar of Voters"
town = item[0]
abstown_name = item[1]
try:
regitem = reg[abse.index(item)]
except:
continue
regtown = regitem[0]
regtown_name = regitem[1]
print [abstown_name], [regtown_name]
if regtown_name == abstown_name:
town_name = abstown_name
else:
print "The lists don't match. Stopping the code."
print [abstown_name]
print [regtown_name]
sys.exit()
#This section finds the full address for the registrar of voters. After finding the address, it identifies a city/state/zip (csz) combination and a PO Box number if that exists.
#It removes both the CSZ and the PO Address (if it exists) from the full address, leaving behind a street address with some garbage.
#It then cleans up the street address and pulls the city, state, and zip out of the csz, and assigns them as appropriate to the street address and state.
reg_address = address_re.findall(regtown)[0]
reg_csz = csz_re.findall(reg_address)[0]
if not reg_address.replace(reg_csz,""):
reg_address = po_re.findall(regtown)[0]#The address grab will fail if address is only a PO Box [A-Z]. If there's no real address, we try this instead.
try:
reg_po_street = po_re.findall(reg_address)[0].replace(reg_csz,"").strip(", \n").title()
except:
reg_po_street = ""
reg_street = reg_address.replace(reg_po_street,"").replace(reg_csz,"")
reg_street = reg_street.replace("\n",", ").replace(" ,",",").strip(" \n/,").title()
if reg_po_street:
reg_po_city = city_re.findall(reg_csz)[0].strip().title()
reg_po_state = state_re.findall(reg_csz)[0].strip()
reg_po_zip_code = zip_re.findall(reg_csz)[0].strip().title()
if reg_street:
reg_city = city_re.findall(reg_csz)[0].strip().title()
reg_state = state_re.findall(reg_csz)[0].strip()
reg_zip_code = zip_re.findall(reg_csz)[0].strip().title()
phone = dogcatcher.find_phone(phone_re, town, areacode = "203")
if ("(203) 203-") in phone:
phone = dogcatcher.clean_phone(phone.partition(" ")[2])
print phone
email = dogcatcher.find_emails(email_re, town)
fax = dogcatcher.find_phone(fax_re, town)
official_name = name_re.findall(town)[0].title()
first_name, last_name, review = dogcatcher.split_name(official_name, review)
#This section finds the full address for the town clerk. After finding the address, it identifies a city/state/zip (csz) combination and a PO Box number if that exists.
#It removes both the CSZ and the PO Address (if it exists) from the full address, leaving behind a street address with some garbage.
#It then cleans up the street address and pulls the city, state, and zip out of the csz, and assigns them as appropriate to the street address and state.
address = abs_address_re.findall(town)[0]
csz = csz_re.findall(address)[0]
if not address.replace(csz,""):
address = po_re.findall(town)[0]#The address grab will fail if address is only a PO Box [A-Z]. If there's no real address, we try this instead.
try:
po_street = po_re.findall(address)[0].replace(csz,"").strip(", \n").title()
except:
po_street = ""
street = address.replace(po_street,"").replace(csz,"")
street = street.replace("\n",", ").replace(" ,",",").strip(" \n/,").title()
if po_street:
po_city = city_re.findall(csz)[0].strip().title()
po_state = state_re.findall(csz)[0].strip()
po_zip_code = zip_re.findall(csz)[0].strip().title()
if street:
city = city_re.findall(csz)[0].strip().title()
address_state = state_re.findall(csz)[0].strip()
zip_code = zip_re.findall(csz)[0].strip().title()
if street:
fips, county_name = dogcatcher.map_fips(city, address_state, town_name, zip_code)
else:
fips, county_name = dogcatcher.map_fips(po_city, po_state, town_name, po_zip_code)
result.append([authority_name, first_name, last_name, town_name, fips, county_name,
street, city, address_state, zip_code,
po_street, po_city, po_state, po_zip_code,
reg_authority_name, reg_first, reg_last,
reg_street, reg_city, reg_state, reg_zip_code,
reg_po_street, reg_po_city, reg_po_state, reg_po_zip_code,
reg_phone, reg_fax, reg_email, reg_website, reg_hours,
phone, fax, email, website, hours, voter_state, source, review])
#This outputs the results to a separate text file.
dogcatcher.output(result, voter_state, cdir, "cities")
|
UTF-8
|
Python
| false | false | 2,014 |
18,614,388,272,788 |
f2b03135ea726cf3aa1da7d29db783300c11f0af
|
7ae9b49a92c6c49054be8d3b2c87494a4442092c
|
/python/client.py
|
073c6034758ec2cc9cd12151ea01aa89368c4045
|
[] |
no_license
|
yanyingbing/dev
|
https://github.com/yanyingbing/dev
|
425d66ec41d9946a4aec894f17ef706acae3225a
|
e20914081ab66b86536683f8aad0980d87563702
|
refs/heads/master
| 2021-01-25T04:02:05.851971 | 2014-08-29T07:58:12 | 2014-08-29T07:58:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import socket
s=socket.socket()
s.connect(('127.0.0.1',2000))
data=s.recv(512)
print 'the data received is\n ',data
s.send('hihi I am client')
sock2 = socket.socket()
sock2.connect(('127.0.0.1',2000))
data2=sock2.recv(512)
print 'the data received from server is\n ',data2
sock2.send('client send use sock2')
sock2.close()
s.close()
|
UTF-8
|
Python
| false | false | 2,014 |
11,811,160,109,385 |
fceaca86713da70840ab5a6560fbdea8000ac6e0
|
81ff4fb051a612ea7c7e9e7733de6a86ad846504
|
/Rice_Hydrology_Ines/tableAW.py
|
b2518dbb06d0d099b2d8d30ed2ab62cbee8a9b9f
|
[] |
no_license
|
kuckaogh/testprojectoned
|
https://github.com/kuckaogh/testprojectoned
|
2e731dd2dbc994adb7775d90cd1b05b5cb2b4764
|
e7e8cb3457af7dddc5546ba8b516c843acfe3f39
|
refs/heads/master
| 2021-01-22T17:53:13.521646 | 2011-03-23T01:20:06 | 2011-03-23T01:20:06 | 32,550,118 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
Grow=['NA']
Grow_FlowT=['NA']
NonPond = ['NA']
Pond=['NA']
Decomp_FlowT = ['NA']
TableAW_file = open('table_AW.txt', 'r')
lines = TableAW_file.readlines()
for i,line in enumerate (lines[2:]): #skip title and comment
line = line.strip().split()
try:
Grow.append(float(line[1])/12)
Grow_FlowT.append(float(line[2])/12)
Pond.append(float(line[3])/12)
NonPond.append(float(line[4])/12)
Decomp_FlowT.append(float(line[5])/12)
except:
print "error in TableAW.py!"
|
UTF-8
|
Python
| false | false | 2,011 |
17,325,898,090,469 |
fbc96d5716472404b5ac0e6106283b5495466a4d
|
258938c24ab5f538601ecbfde9d7d733b037e8f0
|
/lib/molmod/io/gaussian03/mkinput.py
|
42976c9728313aad1e760870577fbfd1ce23a760
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
pengfeili1/Molmodsummer
|
https://github.com/pengfeili1/Molmodsummer
|
2c949e01e10d9902f68fdfd464d23dac33a6e73e
|
49d7daeefc4ae7076d7178382f77a1bf82bca4bf
|
refs/heads/master
| 2021-01-13T13:55:52.398497 | 2009-08-31T23:29:40 | 2009-08-31T23:29:40 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.data.periodic import periodic
from molmod.units import A
import os, numpy
__all__ = ["mkinput", "mkinput_multiopt"]
template="""%%chk=%(basename)s.chk
%%nproc=%(nproc)i
%%mem=%(mem)s
# %(lot)s %(route_args)s maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
%(charge)i %(spin)i
%(atom_lines)s
%(post)s
"""
def mkinput(
molecule, charge, spin, lot, route_args, post, nproc, mem, maxdisk, com_filename,
center=True, overwrite=False, ghost_mask=None
):
destdir = os.path.dirname(com_filename)
basename = os.path.basename(com_filename)
if basename.endswith(".com"):
basename = basename[:-4]
if not os.path.isdir(destdir):
os.makedirs(destdir)
com_filename = os.path.join(destdir, "%s.com" % basename)
if not os.path.isfile(com_filename) or overwrite:
if molecule is None:
atom_lines = "${atom_lines}"
else:
coordinates = numpy.array(molecule.coordinates)
if center:
# move the coordinates to the origin
coordinates -= molecule.coordinates.mean(0)
symbols = [periodic[number].symbol for number in molecule.numbers]
# Optionally set ghost atoms:
if ghost_mask is not None:
for i in xrange(len(symbols)):
if ghost_mask[i]:
symbols[i] = "%s-Bq" % symbols[i]
atom_lines = "\n".join("% 5s % 12.7f % 12.7f % 12.7f" % (
symbol, cor[0], cor[1], cor[2]
) for symbol, cor in zip(symbols, coordinates/A))
# Write an xyz file
molecule.write_to_file(os.path.join(destdir, "geom.xyz"))
f = file(com_filename, "w")
f.write(template % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"route_args": route_args,
"maxdisk": maxdisk,
"charge": charge,
"spin": spin,
"atom_lines": atom_lines,
"post": post,
})
f.close()
template_multiopt_top="""%%chk=%(basename)s.chk
%%nproc=%(nproc)i
%%mem=%(mem)s
# %(lot)s opt=ModRedundant maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
%(charge)i %(spin)i
%(atom_lines)s
%(post)s
"""
template_multiopt_link="""--Link1--
%%chk=%(basename)s.chk
%%mem=%(mem)s
%%nproc=%(nproc)s
#p %(lot)s opt Geom(AllCheck) maxdisk=%(maxdisk)s NoSymm
Who cares about the title?
"""
def mkinput_multiopt(
molecule, charge, spin, lot_mem_pairs, post, nproc, maxdisk, com_filename,
center=True, overwrite=False
):
destdir = os.path.dirname(com_filename)
basename = os.path.basename(com_filename)
if basename.endswith(".com"):
basename = basename[:-4]
if len(destdir) > 0 and not os.path.isdir(destdir):
os.makedirs(destdir)
com_filename = os.path.join(destdir, "%s.com" % basename)
if not os.path.isfile(com_filename) or overwrite:
if center:
# move the coordinates to the origin
molecule.coordinates -= molecule.coordinates.mean(0)
symbols = [periodic[number].symbol for number in molecule.numbers]
f = file(com_filename, "w")
# Write a gaussian file (top)
atom_lines = "\n".join("% 2s % 12.7f % 12.7f % 12.7f" % (
periodic[number].symbol, cor[0], cor[1], cor[2]
) for number, cor in zip(molecule.numbers, molecule.coordinates/A))
lot, mem = lot_mem_pairs[0]
f.write(template_multiopt_top % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"maxdisk": maxdisk,
"charge": charge,
"spin": spin,
"atom_lines": atom_lines,
"post": post,
})
for lot, mem in lot_mem_pairs[1:]:
f.write(template_multiopt_link % {
"basename": basename,
"nproc": nproc,
"mem": mem,
"lot": lot,
"maxdisk": maxdisk,
"post": post,
})
f.close()
# Write an xyz file
molecule.write_to_file(os.path.join(destdir, "geom.xyz"))
|
UTF-8
|
Python
| false | false | 2,009 |
2,061,584,308,266 |
d0a1e7a0728e398bb89ad76764bca42b699a96de
|
825b3cdd48360234cb78a7be1110c7dddee4354d
|
/paths.py
|
cabf82673df99f6fdf1ed8491a1de33efefdc722
|
[
"LGPL-2.1-only",
"PSF-2.0",
"GPL-2.0-only",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"LicenseRef-scancode-public-domain"
] |
non_permissive
|
csuarez/emesene-1.6.3-fixed
|
https://github.com/csuarez/emesene-1.6.3-fixed
|
6a525ec9fe2c98e5a7342ee0a88202faf1cb3f4f
|
064bc64e6393d08d3614d87335e6b1f0c401fb84
|
refs/heads/master
| 2020-05-17T17:13:25.911606 | 2011-11-09T17:02:15 | 2011-11-09T17:02:15 | 2,742,957 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# This file is part of emesene.
#
# Emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
DIR_SEP = os.sep
if hasattr(sys, "frozen"):
APP_PATH = os.path.dirname(sys.executable)
else:
APP_PATH = os.path.abspath(os.path.dirname(__file__))
if (os.name != 'nt'):
HOME_DIR = os.path.expanduser('~')
else:
HOME_DIR = os.path.expanduser("~").decode(sys.getfilesystemencoding())
CONF_DIR_NAME = '.config' + DIR_SEP + 'emesene1.0'
CONFIG_DIR = HOME_DIR + DIR_SEP + CONF_DIR_NAME
THEME_HOME_PATH = CONFIG_DIR + DIR_SEP + 'themes'
THEME_SYSTEM_WIDE_PATH = APP_PATH + DIR_SEP + 'themes'
DEFAULT_THEME_PATH = THEME_SYSTEM_WIDE_PATH + DIR_SEP + 'default' + DIR_SEP
PLUGINS_HOME = 'pluginsEmesene'
PLUGINS_SYSTEM_WIDE = 'plugins_base'
PLUGIN_SYSTEM_WIDE_PATH = APP_PATH + DIR_SEP + PLUGINS_SYSTEM_WIDE
PLUGIN_HOME_PATH = CONFIG_DIR + DIR_SEP + PLUGINS_HOME
SMILIES_SYSTEM_WIDE_PATH = APP_PATH + DIR_SEP + 'smilies'
SMILIES_HOME_PATH = CONFIG_DIR + DIR_SEP + 'smilies'
DEFAULT_SMILIES_PATH = SMILIES_SYSTEM_WIDE_PATH + DIR_SEP + 'default' + DIR_SEP
CONVTHEMES_SYSTEM_WIDE_PATH = APP_PATH + DIR_SEP + 'conversation_themes'
CONVTHEMES_HOME_PATH = CONFIG_DIR + DIR_SEP + 'conversation_themes'
DEFAULT_CONVTHEMES_PATH = CONVTHEMES_SYSTEM_WIDE_PATH + DIR_SEP + 'default' + DIR_SEP
LANG_PATH = APP_PATH + DIR_SEP + 'po'
SOUNDS_PATH = APP_PATH + DIR_SEP + 'sound_themes'
del os, sys
|
UTF-8
|
Python
| false | false | 2,011 |
2,018,634,661,556 |
9fe8d3879049c212113563b8532f0b5209597c93
|
9bef5ee1014cfecb1c8ac25c28718f23014c43d2
|
/filechanges/opt/musicbox/musicboxwebserver.py
|
0092c1a41a9387d7fc4b01e072677843a16e9044
|
[] |
no_license
|
drewdotpro/BananaPi-MusicBox
|
https://github.com/drewdotpro/BananaPi-MusicBox
|
966944c4854845fc8a234f931da27b47287928c2
|
a45e528fa5cab6438aeededfe2dec40c92581cf8
|
refs/heads/master
| 2021-01-15T15:32:15.024770 | 2014-11-08T17:28:26 | 2014-11-08T17:28:26 | 26,363,644 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# Webserver for musicbox functions
# (c) Wouter van Wijk 2014
# GPL 3 License
import cherrypy
from configobj import ConfigObj, ConfigObjError
from validate import Validator
import os
import jinja2
config_file = '/boot/config/settings.ini'
spec_file = '/opt/musicbox/settingsspec.ini'
template_file = '/opt/webclient/settings/index.html'
log_file = '/var/log/mopidy/mopidy.log'
class runServer(object):
#setup static files
_cp_config = {'tools.staticdir.on' : True,
'tools.staticdir.dir' : '/opt/defaultwebclient',
'tools.staticdir.index' : 'index.html',
}
@cherrypy.expose
@cherrypy.tools.allow(methods=['POST'])
def updateSettings(self, **params):
error = ''
try:
config = ConfigObj(config_file, configspec=spec_file, file_error=True)
except (ConfigObjError, IOError), e:
error = 'Could not load ini file!'
print (params)
validItems = ConfigObj(spec_file)
templateVars = {
"error": error
}
#iterate over the items, so that only valid items are processed
for item in validItems:
for subitem in validItems[item]:
itemName = item + '__' + subitem
print itemName
if itemName in params.keys():
config[item][subitem] = params[itemName]
print params[itemName]
config.write()
#os.system("shutdown -r now")
return '<html><body><h1>Settings Saved!</h1>Rebooting MusicBox...<br/><a href="/">Back</a></body></html>'
updateSettings._cp_config = {'tools.staticdir.on': False}
@cherrypy.expose
def settings(self, **params):
templateLoader = jinja2.FileSystemLoader( searchpath = "/" )
templateEnv = jinja2.Environment( loader=templateLoader )
template = templateEnv.get_template(template_file)
error = ''
#read config file
try:
config = ConfigObj(config_file, configspec=spec_file, file_error=True)
except (ConfigObjError, IOError), e:
error = 'Could not load ini file!'
print (error)
#read values of valid items (in the spec-file)
validItems = ConfigObj(spec_file)
templateVars = {
"error": error
}
#iterate over the valid items to get them into the template
for item in validItems:
print(item)
for subitem in validItems[item]:
print('-'+subitem)
itemName = item + '__' + subitem
try:
templateVars[itemName] = config[item][subitem]
print templateVars[itemName]
except:
pass
print templateVars
return template.render ( templateVars )
settings._cp_config = {'tools.staticdir.on': False}
@cherrypy.expose
@cherrypy.tools.allow(methods=['POST'])
def haltSystem(self, **params):
os.system("shutdown -h now")
haltSystem._cp_config = {'tools.staticdir.on': False}
@cherrypy.expose
@cherrypy.tools.allow(methods=['POST'])
def rebootSystem(self, **params):
os.system("shutdown -r now")
rebootSystem._cp_config = {'tools.staticdir.on': False}
@cherrypy.expose
def log(self, **params):
page = '<html><body><h2>MusicBox/Mopidy Log (can take a while to load...)</h2>'
with open(log_file, 'r') as f:
page += '<pre>%s</pre>' % f.read()
page += '</body></html>'
return page
log._cp_config = {'tools.staticdir.on': False}
cherrypy.config.update({'server.socket_host': '0.0.0.0', 'server.socket_port': 8080 })
cherrypy.quickstart(runServer())
|
UTF-8
|
Python
| false | false | 2,014 |
10,342,281,262,536 |
28eaf581084e39cffa1849a9a5372453594d5237
|
3115014ccb77437dda47d30a7dccabbdf4f41d64
|
/lab7/main.py
|
dd083062009c6f418b9b4b8d8e4e31061be7be86
|
[
"MIT"
] |
permissive
|
largelymfs/lab_numeric_analysis
|
https://github.com/largelymfs/lab_numeric_analysis
|
d43d8c487a61a826ef022120e47e3519e8b474b7
|
9f50ecbe0ce19ccac86d34af6139e31ce7c2b5c6
|
refs/heads/master
| 2021-01-23T22:07:43.660315 | 2014-06-21T11:16:26 | 2014-06-21T11:16:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*- coding:utf-8 -*-
import equation
def solve_jacobi(A, B, times):
m = len(A)
n = len(A[0])
new = []
old = []
for i in range(n):
old.append(1.0)
new.append(0.0)
for k in range(times):
for i in range(n):
tmp = 0.0
for j in range(n):
if j != i:
tmp =tmp + float(A[i][j]) * float(old[j])
tmp = float((B[i] - tmp ))/float(A[i][i])
new[i] = tmp
for i in range(n):
old[i] = new[i]
return old
def solve_GS(A, B, times):
m = len(A)
n = len(A[0])
new = []
old = []
for i in range(n):
old.append(1.0)
new.append(0.0)
for k in range(times):
for i in range(n):
tmp = 0.0
for j in range(n):
if j > i:
tmp =tmp + float(A[i][j]) * float(old[j])
elif j < i:
tmp =tmp + float(A[i][j]) * float(new[j])
tmp = float((B[i] - tmp ))/float(A[i][i])
new[i] = tmp
for i in range(n):
old[i] = new[i]
return old
def solve_SOR(A, B, omega, times):
m = len(A)
n = len(A[0])
new = []
old = []
for i in range(n):
old.append(1.0)
new.append(0.0)
for k in range(times):
for i in range(n):
tmp = 0.0
for j in range(n):
if j > i:
tmp =tmp + float(A[i][j]) * float(old[j])
elif j < i:
tmp =tmp + float(A[i][j]) * float(new[j])
tmp = omega * float((B[i] - tmp ))/float(A[i][i])
new[i] = (1-omega) * old[i]+ tmp
for i in range(n):
old[i] = new[i]
return old
def build(elpson, n, a):
h = 1.0/float(n)
elpson = float(elpson)
a = float(a)
A = []
for i in range(n):
A.append([])
for j in range(n):
A[i].append(0.0)
for i in range(n):
if i - 1 >= 0:
A[i][i - 1] = elpson
if i + 1 < n:
A[i][i + 1] = elpson + h
A[i][i] = -(2 * elpson + h)
B = []
for i in range(n):
B.append(a * h * h)
return A, B
if __name__=="__main__":
A = [[20, 2, 3],[1, 8, 1],[2, -3, 15]]
B = [24, 12, 30]
A, B = build(0.1, 100, 0.5)
#n = len(A)
#for i in range(n):
# for j in range(n):
# print A[i][j],
# print
print solve_jacobi(A, B, 100)[-5:-1]
print solve_GS(A, B, 100)[-5:-1]
print solve_SOR(A, B, 0.8, 100)[-5:-1]
print equation.solve(A, B)[1][-5:-1]
|
UTF-8
|
Python
| false | false | 2,014 |
7,456,063,271,497 |
08d14ed384a2a844dc25d49bbbf3a950361d38e4
|
f1a79583a1afde2a8b7212a9a7ccbc8488603977
|
/automation/open/testmodules/RT/quick_start/quick_start_diy_binhello.py
|
7b782db3f682b0fa1ba48ca74dde2f8a1e2eb065
|
[] |
no_license
|
fdumpling/practices
|
https://github.com/fdumpling/practices
|
b86a5803c6c18c53643b0700d0a575f4182c9dfb
|
53d3bd69bfacd7cf73635fa787b7e390b2015c1e
|
refs/heads/master
| 2015-08-04T09:03:36.639966 | 2014-02-24T15:24:45 | 2014-02-24T15:24:45 | 6,406,427 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import os, sys
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.normpath(WORK_DIR + "/../../../")
sys.path.append(testdir)
import rhtest
import common
# user defined packages
import urllib
from quick_start_test import QuickStartTest
class QuickStartDiyBinhello(QuickStartTest):
def __init__(self, config):
rhtest.Test.__init__(self, config)
self.config.application_type = common.app_types["diy"]
self.config.application_embedded_cartridges = [ ]
self.config.summary = "[Runtime][rhc-cartridge]quick-start example: DYI-Binhello"
self.config.page = "" # means '/'
self.config.page_pattern = "Hello, World!"
def configuration_steps(self):
self.log_info("Configuring")
# Getting binhello
# Saving to $GIT_REPO/bin
print "Creating directory: %s/bin" % self.config.application_name
os.mkdir(self.config.application_name + "/bin")
print "Downloading remote binary"
remote_binary = urllib.urlopen("https://raw.github.com/openshift/openshift-diy-binhello-demo/master/binhello")
binhello_binary = open("%s/bin/binhello" % self.config.application_name, "w")
binhello_binary.write(remote_binary.read())
binhello_binary.close()
print "Adding execution permissions to the binary"
os.chmod("%s/bin/binhello" % self.config.application_name, 0755)
#Editing configuration files
start_hook_filename = "%s/.openshift/action_hooks/start" % self.config.application_name
print "Editing configuration file: " + start_hook_filename
start_hook = open(start_hook_filename, "w")
start_hook.write("#!/bin/bash\n")
start_hook.write("cd $OPENSHIFT_REPO_DIR/bin\n")
start_hook.write("nohup ./binhello >${OPENSHIFT_DIY_LOG_DIR}/binhello.log 2>&1 &\n")
start_hook.close()
os.chmod(start_hook_filename, 0755)
stop_hook_filename = "%s/.openshift/action_hooks/stop" % self.config.application_name
print "Editing configuration file: " + stop_hook_filename
stop_hook = open(stop_hook_filename, "w")
stop_hook.write("#!/bin/bash\n")
stop_hook.write("kill `ps -ef | grep binhello | grep -v grep | awk '{ print $2 }'` > /dev/null 2>&1\n")
stop_hook.write("exit 0\n")
stop_hook.close()
os.chmod(stop_hook_filename, 0755)
def pre_deployment_steps(self):
self.log_info("Performing additional step before deploying")
steps = [
"cd %s" % self.config.application_name,
"git add .",
"git commit -a -m testing"
]
ret_code = common.command_get_status(" && ".join(steps))
class OpenShiftTestSuite(rhtest.TestSuite):
pass
def get_suite(conf):
suite = OpenShiftTestSuite(conf)
suite.add_test(QuickStartDiyBinhello)
return suite
def run(conf):
suite = get_suite(conf)
suite()
|
UTF-8
|
Python
| false | false | 2,014 |
60,129,564,727 |
9280b1084782e65549a0a4d242644c9e716a1e20
|
8e7a3c63120a9bfb04342d7c98a564c804e3bb45
|
/assignment4.py
|
5f59a3f1f81c1fba17bd5cc554e9276fa2e16b6e
|
[] |
no_license
|
pombredanne/reinforcement-learning
|
https://github.com/pombredanne/reinforcement-learning
|
857452356e1ceb48ebc39164866e63b1c604c279
|
38053d305e5dffcdd587585ba235c0ce5dc27564
|
refs/heads/master
| 2017-06-01T14:17:43.218858 | 2013-12-19T12:47:14 | 2013-12-19T12:47:14 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import animate, numpy, pylab, random
#transition matrix initialization
trans = ((1, 3 ,4 ,12),
(0, 2, 5, 13),
(3, 1, 6, 14),
(2, 0, 7, 15),
(5, 7, 0, 8),
(4, 6, 1, 9),
(7, 5, 2, 10),
(6, 4, 3, 11),
(9, 11, 12, 4),
(8, 10, 13, 5),
(11, 9, 14, 6),
(10, 8, 15, 7),
(13, 15, 8, 0),
(12, 14, 9, 1),
(15, 13, 10, 2),
(14, 12, 11, 3))
#several reward matrices initialization
'''rew = ((1, 0, 1, 0),
(-1, 1, 0, 0),
(1, -1, 0, 0),
(-1, 0, 1, 0),
(0, 0, -1, 1),
(-1, 1, -1, 1),
(1, -1, 1, 0),
(0, 0, -1, 1),
(0, -1, 1, 0),
(0, 1, 1, -1),
(1, -1, 1, -1),
(0, 0, 1, -1),
(1, 0, -1, 0),
(-1, 1, 0, 0),
(1, -1, 0, 0),
(-1, 0, -1, 0))
'''
'''rew = ((0, 3, 0, 3),
(-1, 0, -1, 3),
(0, -1, -3, 3),
(-2, -1, 0, 3),
(-3, 1, -1, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(-3, -1, -1, 0),
(-3, 3, 1, -1),
(0, 0, 0, 0),
(0, 0, 0, 0),
(-3, -1, 3, -1),
(0, 3, -1, -1),
(-1, 1, -3, -1),
(1, -1, -3, -1),
(-1, -1, -1, -1))'''
'''rew = ((0, 0, 0, 0),
(-1, 0, -1, 0),
(0, -1, -1, 0),
(-1, -1, 0, 1),
(-1, 1, -1, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(-1, -1, -1, 0),
(-1, 0, 0, -1),
(0, 0, 0, 0),
(0, 0, 0, 0),
(-1, -1, 1, -1),
(0, 1, -1, -1),
(-1, 0, -1, -1),
(1, -1, -1, -1),
(-1, 0, -1, 0))'''
rew = ((0, -1, 0, -1), # 0
(-2, 0, -2, -1), # 1
(0, -1, -2, -1), # 2
(-2, -1, 2, -1), # 3
(-2, -1, -2, 0), # 4
(0, 0, 0, 0), # 5
(0, -1, 0, 0), # 6
(-2, -1, -2, 0), # 7
( -2, -1, 0, -1), # 8
( 0, 0, 0, -1), # 9
( 0, -1, 0, -1), # 10
( -2, 0, 0, -1), # 11
( 2, -1, -2, -1), # 12
( -2, 0, -2, -1), # 13
( -2, -1, -2, 0), # 14
( 0, -1, 0, -1))
def argmax(f, args):
'''
argmax function that does same stuff like argmax in matlab or in optimization
'''
mi = None
m = -1e10
for i in args:
v = f(i)
if v > m:
m = v
mi = i
return mi
policy = [None for s in trans]
value = [0 for s in trans]
gamma = 0.1
#policy iteration implementation
for p in range(100):
for s in range(len(policy)):
policy[s] = argmax(lambda(a): rew[s][a] + gamma * value[trans[s][a]], range(4))
for s in range(len(value)):
a = policy[s]
value[s] = rew[s][a] + gamma * value[trans[s][a]]
#sequence from policy retrival
sequence = []
sequence.append(0)
for i in range(len(policy)):
sequence.append(trans[i][policy[i]])
print sequence
#loading all the available images
images = (pylab.imread('step1.png'),
pylab.imread('step2.png'),
pylab.imread('step3.png'),
pylab.imread('step4.png'),
pylab.imread('step5.png'),
pylab.imread('step6.png'),
pylab.imread('step7.png'),
pylab.imread('step8.png'),
pylab.imread('step9.png'),
pylab.imread('step10.png'),
pylab.imread('step11.png'),
pylab.imread('step12.png'),
pylab.imread('step13.png'),
pylab.imread('step14.png'),
pylab.imread('step15.png'),
pylab.imread('step16.png'))
#visualization of the robot walk
#comic = numpy.concatenate([images[i] for i in sequence], axis=1)
#pylab.imshow(comic)
#pylab.show()
animate.draw(sequence)
class Environment :
'''
Representation of the environment for the Q-learning algorithm
'''
def __init__(self, state=0):
self.state = state
self.trans = ((1, 3 ,4 ,12),
(0, 2, 5, 13),
(3, 1, 6, 14),
(2, 0, 7, 15),
(5, 7, 0, 8),
(4, 6, 1, 9),
(7, 5, 2, 10),
(6, 4, 3, 11),
(9, 11, 12, 4),
(8, 10, 13, 5),
(11, 9, 14, 6),
(10, 8, 15, 7),
(13, 15, 8, 0),
(12, 14, 9, 1),
(15, 13, 10, 2),
(14, 12, 11, 3))
self.rew = ((0, -1, 0, -1), # 0
(-2, 0, -2, -1), # 1
(0, -1, -2, -1), # 2
(-2, -1, 2, -1), # 3
(-2, -1, -2, 0), # 4
(0, 0, 0, 0), # 5
(0, -1, 0, 0), # 6
(-2, -1, -2, 0), # 7
( -2, -1, 0, -1), # 8
( 0, 0, 0, -1), # 9
( 0, -1, 0, -1), # 10
( -2, 0, 0, -1), # 11
( 2, -1, -2, -1), # 12
( -2, 0, -2, -1), # 13
( -2, -1, -2, 0), # 14
( 0, -1, 0, -1))
def go(self, a):
'''
performing one step of the robot
'''
r = self.rew[self.state][a]
self.state = self.trans[self.state][a]
return self.state, r
#Q-learning implementation
environment = Environment()
epsilon = 0.7
Q = numpy.zeros((16,4))
stepSize = 0.2
discount = 0.8
state = 0
for p in range(10000):
transition = []
action = 0
if random.random() > epsilon:
action = random.randint(0, 3)
transition = environment.go(action)
else:
action = argmax(lambda(a): Q[state][a], range(4))
transition = environment.go(action)
Q[state][action] = Q[state][action] + stepSize * (transition[1] + discount * Q[transition[0]][argmax(lambda(a): Q[transition[0]][a], range(4))] - Q[state][action])
state = transition[0]
print Q
#retrieve the path from the initial state
sequence = []
sequence.append(state)
for i in range(16):
action = argmax(lambda(a): Q[state][a], range(4))
#print action
transition = environment.go(action)
#print transition
sequence.append(transition[0])
state = transition[0]
print sequence
animate.draw(sequence)
#visualization of the robot walk
comic = numpy.concatenate([images[i] for i in sequence], axis=1)
pylab.imshow(comic)
pylab.show()
|
UTF-8
|
Python
| false | false | 2,013 |
10,874,857,237,779 |
3bd714d4f7038721aa03ddb7b5dec83288c00195
|
7278b31ebd6362bebf6986c2f3eca89d87201eb2
|
/exp/sandbox/recommendation/NimfaFactorise.py
|
5414b3d271e932fd4c73c260ad4018ebfa147209
|
[] |
no_license
|
malcolmreynolds/APGL
|
https://github.com/malcolmreynolds/APGL
|
c19827b1b834d3491d98a751c91838177aedc29e
|
1703510cbb51ec6df0efe1de850cd48ef7004b00
|
refs/heads/master
| 2020-12-25T05:52:45.826947 | 2013-03-26T12:30:00 | 2013-03-26T12:30:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
A wrapper for the matrix factorisation in Nimfa.
"""
import nimfa
from apgl.util.Parameter import Parameter
from apgl.util.MCEvaluator import MCEvaluator
from exp.sandbox.recommendation.AbstractMatrixCompleter import AbstractMatrixCompleter
class NimfaFactorise(AbstractMatrixCompleter):
def __init__(self, method, rank=10, maxIter=10):
"""
Intialise the matrix factorisation with a given algorithm, rank and
max number of iterations. The rank can be a 1d array in which case
we use warm restarts to compute the full regularisation path.
"""
super(NimfaFactorise, self).__init__()
self.method = method
self.rank = rank
self.maxIter = maxIter
def setRank(self, rank):
Parameter.checkInt(rank, 1, float("inf"))
rank = self.rank
def getRank(self):
return self.rank
def setMaxIter(self, maxIter):
Parameter.checkInt(maxIter, 1, float("inf"))
maxIter = maxIter
def getMaxIter(self):
return self.maxIter
def learnModel(self, X):
"""
Learn X using a matrix factorisation method. If self.rank is an integer
then we factorise with that rank. If it is an array then we compute the
complete regularisation path and return a list of matrices.
"""
if isinstance(self.rank, int):
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predX = W.dot(H)
return predX
else:
predXList = []
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank[0])
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predXList.append(W.dot(H))
for i in range(1, self.rank.shape[0]):
model = nimfa.mf(X, method=self.method, max_iter=self.maxIter, rank=self.rank[i], W=W, H=H)
fit = nimfa.mf_run(model)
W = fit.basis()
H = fit.coef()
predXList.append(W.dot(H))
return predXList
def getMetricMethod(self):
return MCEvaluator.meanSqError
def copy(self):
"""
Return a new copied version of this object.
"""
nimfaFactorise = NimfaFactorise(method=self.method, rank=self.rank, maxIter=self.maxIter)
return nimfaFactorise
def name(self):
return "NimfaFactorise"
|
UTF-8
|
Python
| false | false | 2,013 |
6,176,163,001,367 |
af44d55bf703dff9a47d88eec84a26951835bc10
|
ff490702af97d76d7c517a92207e3bb0df9d5223
|
/python/ns/maya/InstancerUtil.py
|
2a904f48af619d0d181d0c6ef4fbb822509710b0
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
non_permissive
|
FaithStudio/uninstancer
|
https://github.com/FaithStudio/uninstancer
|
5c93ea3cc3130f298f2729a8ca7fd59274c1cb2c
|
4bbdefe0e3b4b28984860cd593268b3b48c90db0
|
refs/heads/master
| 2021-01-13T03:01:26.484586 | 2011-02-17T08:10:13 | 2011-02-17T08:10:13 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# The MIT License
#
# Copyright (c) 2009 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# File: NSfnParticleInstancer.cpp
#
# Author: Nimble Studios Inc.
#
import sys
from maya.OpenMaya import *
from maya.OpenMayaFX import *
import ns.py as nspy
import ns.py.Errors
#import NSpy
#NSpy.nsReload( "NSerrors" )
#import NSmaya
#from NSerrors import *
#NSpy.nsReload( "NSmayaErrors" )
#from NSmayaErrors import *
#kPluginName = "blahBlah"
def particle( dpInstancer ):
fInstancer = MFnInstancer( dpInstancer )
# The particle system should be connected to the instancer's
# inputPoints attribute.
#
aInputPoints = fInstancer.attribute("inputPoints")
pInputPoints = MPlug( dpInstancer.node(), aInputPoints )
sources = MPlugArray()
pInputPoints.connectedTo( sources, True, False )
if not sources.length() == 1:
raise nspy.Errors.Error("No particle system associated with the instancer.")
return sources[0].node(), sources[0].parent().logicalIndex()
def getInstance( dpInstancer, index ):
#
# Description:
# Returns the index'th instanced object.
#
# WARNING:
# If index'th element of inputHierarchyPlug does not exist
# it will be created - so make sure that index is less
# than the total number of instanced objects.
#
pInputHier = inputHierarchyPlug( dpInstancer )
plug = pInputHier.elementByLogicalIndex( index )
sources = MPlugArray()
plug.connectedTo( sources, True, False )
if sources.length() == 0:
return MObject.kNullObj
return sources[0].node()
def inputHierarchyPlug( dpInstancer ):
fInstancer = MFnInstancer( dpInstancer )
aInputHierarchy = fInstancer.attribute("inputHierarchy")
return MPlug( dpInstancer.node(), aInputHierarchy )
|
UTF-8
|
Python
| false | false | 2,011 |
6,700,148,983,210 |
d1fb7dc9a0e839c69c53fbb8f93f69f8f0fb492b
|
14b8af56658ad55d1b7e82b972f3298027912e02
|
/bookmarkdown/bookmarkdown
|
a55349e0ff51dea194fc999cc0c7ea0a3e09c4bb
|
[
"MIT"
] |
permissive
|
guneysus/bookmarkdown
|
https://github.com/guneysus/bookmarkdown
|
b0ef49c0147466aab32de3ec37ad1451ba18f8c3
|
2c1df781bfa0c13d289ef11b637886ad84c8c615
|
refs/heads/master
| 2015-08-17T13:08:42.527513 | 2012-10-14T00:24:28 | 2012-10-14T00:24:28 | 27,396,624 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# {{{
import os, re, shutil, sys
import baker
import markdown
try:
import config
except ImportError:
config = {}
from pyquery import PyQuery as pq
from jinja2 import Environment, PackageLoader
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
env = Environment(loader=PackageLoader('bookmarkdown', 'templates'))
join = os.path.join
base_context = {
'book_title': getattr(config, 'title', ''),
'author': getattr(config, 'author', ''),
'author_url': getattr(config, 'author_url', ''),
'ga_id': getattr(config, 'ga_id', ''),
'gauges_id': getattr(config, 'gauges_id', ''),
}
md = markdown.Markdown(extensions=['toc', 'codehilite'], safe_mode=False)
# }}}
# Utilities -------------------------------------------------------------------
def mkdirs(path):
def _md(acc, next):
target = join(acc, next)
os.path.exists(target) or os.mkdir(target)
return target
reduce(_md, path.split(os.path.sep), '')
def render(template, **context):
full_context = {}
full_context.update(base_context)
full_context.update(context)
return env.get_template('%s.html' % template).render(**full_context)
# Guts ------------------------------------------------------------------------
def _get_next_prev(path, paths):
try:
i = paths.index(path)
next = ('/%s.html' % paths[i + 1]) if i < len(paths) - 1 else None
prev = ('/%s.html' % paths[i - 1]) if i > 0 else None
except ValueError:
next, prev = None, None
return next, prev
def _build_html_file(path, template, paths=[], context={}):
source = '%s.markdown' % path
if not os.path.exists(source):
return
with open(source, 'r') as f:
raw_markdown = f.read()
if template == 'chapter':
raw_markdown = '[TOC]\n\n' + raw_markdown
content = md.convert(raw_markdown)
cq = pq(content)
try:
name = cq('h1').text()
except ValueError:
name = "Untitled Chapter"
if template != 'splash':
title = cq('h1').text()
else:
title = None
try:
toc = cq('.toc').html()
cq('.toc').remove()
content = unicode(cq)
except ValueError:
toc = None
next, prev = _get_next_prev(path, paths)
out = render(template, title=title, content=content, name=name, next=next,
prev=prev, toc=toc, **context)
target = join('build', 'html', '%s.html' % path)
with open(target, 'w') as f:
f.write(out)
content = pq(out)
return {'content': content, 'name': content('.content h1').text(),
'filename': path}
def _build_leanpub_file(path):
source = '%s.markdown' % path
if not os.path.exists(source):
return
with open(source, 'r') as f:
content = f.read()
content = re.sub(r' :::(\w+)',
r'{:lang="\1"}',
content)
target = join('build', 'leanpub', '%s.markdown' % path.split('/')[-1])
with open(target, 'w') as f:
f.write(content)
def _build_leanpub_book_file(front, chapters):
with open(join('build', 'leanpub', 'Book.txt'), 'w') as f:
f.write('Front:\n')
for path in front:
f.write(path + '.markdown\n')
f.write('Main:\n')
for path in chapters:
f.write(path.split('/')[-1] + '.markdown\n')
def _build_index_file(chapters):
source = 'introduction.markdown'
if not os.path.exists(source):
return
with open(source, 'r') as f:
content = markdown.markdown(f.read())
out = render('splash', content=content, chapters=chapters)
target = join('build', 'html', 'index.html')
with open(target, 'w') as f:
f.write(out)
def _copy_static():
import bookmarkdown as ugly_hack
static_src = join(os.path.dirname(ugly_hack.__file__), 'static')
static_dest = join('build', 'html', 'static')
if os.path.exists(static_dest):
shutil.rmtree(static_dest)
shutil.copytree(static_src, static_dest)
def _build_html():
mkdirs(join('build', 'html', 'chapters'))
_copy_static()
paths = ['preface', 'acknowledgements']
for filename in os.listdir('chapters'):
if filename.endswith('.markdown'):
name = filename.rsplit('.')[0]
paths.append(join('chapters', name))
_build_html_file('license', 'single', paths)
_build_html_file('preface', 'single', paths)
_build_html_file('acknowledgements', 'single', paths)
chapters = []
for path in paths[2:]:
chapter = _build_html_file(path, 'chapter', paths)
chapters.append(chapter)
_build_index_file(chapters)
def _build_leanpub():
mkdirs(join('build', 'leanpub'))
paths = ['preface', 'acknowledgements']
for filename in os.listdir('chapters'):
if filename.endswith('.markdown'):
name = filename.rsplit('.')[0]
paths.append(join('chapters', name))
for path in paths:
_build_leanpub_file(path)
front = ['preface', 'acknowledgements']
if 'chapters/00' in paths:
front.append('00')
chapters = paths[len(front):]
_build_leanpub_book_file(front, chapters)
# Commands -------------------------------------------------------------------------
@baker.command
def leanpub():
'''Build the LeanPub-Markdown version of the book.'''
_build_leanpub()
@baker.command
def html():
'''Build the HTML version of the book.'''
_build_html()
@baker.command
def build():
'''Build all versions of the book.'''
_build_html()
@baker.command
def serve(address='127.0.0.1', port=8000):
'''Serve the rendered book with a local webserver.
:param address: The address to bind the server to (default: 127.0.0.1)
:param port: The port to bind the server to (default: 8000)
'''
import SimpleHTTPServer, SocketServer
os.chdir(join('build', 'html'))
server = SocketServer.TCPServer((address, port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
print "Serving book at http://%s:%d" % (address, port)
server.serve_forever()
@baker.command
def watch():
'''Watch the source files and rebuild the book when changed.'''
pass
# Entry ----------------------------------------------------------------------------
if __name__ == '__main__':
baker.run()
|
UTF-8
|
Python
| false | false | 2,012 |
14,190,571,979,969 |
ecfbfd39c5011613064062a30e670c0a6e6da8e0
|
55c2852270d893866e41684b8e0d95bdd30ef77b
|
/1785.py
|
cb02f8e3a7f1af553daccd740f1787404c4edfab
|
[] |
no_license
|
tuannat/problems.acm.timus
|
https://github.com/tuannat/problems.acm.timus
|
1c887cd3dac22ddf0ea0a56ba85a7603d414ba7b
|
ec7b007aef6cc667c70d7ec52a2953828e8bf930
|
refs/heads/master
| 2016-09-10T08:12:23.922573 | 2013-08-06T09:00:31 | 2013-08-06T09:00:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def check(x):
if (x - 4) <= 0:
return "few"
elif (x - 9) <= 0:
return "several"
elif (x - 19) <= 0:
return "pack"
elif (x - 49) <= 0:
return "lots"
elif (x - 99) <= 0:
return "horde"
elif (x - 249) <= 0:
return "throng"
elif (x - 499) <= 0:
return "swarm"
elif (x - 999) <= 0:
return "zounds"
else:
return "legion"
print check(int(raw_input()))
|
UTF-8
|
Python
| false | false | 2,013 |
7,516,192,805,803 |
2e0131bdcdeb644d8b75ab3563713a3341dd43d4
|
ad9b46573faee3651c88417f2892a28eecda9f96
|
/LikeTwitter/apps/notes/__init__.py
|
5753c78b9acec014320c45028ee648da6f307671
|
[] |
no_license
|
bidstrup2000/LikeTwitter
|
https://github.com/bidstrup2000/LikeTwitter
|
21bddf1a99b015449d53f3a37d50781bcd42d023
|
7f0de05903a706e088aec7d4dca42ee6fe29b817
|
refs/heads/master
| 2021-01-15T13:01:55.291471 | 2013-10-04T14:25:15 | 2013-10-04T14:25:15 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
""" Appication for displaying, searching, editing users notes """
|
UTF-8
|
Python
| false | false | 2,013 |
5,884,105,216,396 |
76611ef05e90dc2a1d321459b7b0695c0a66f134
|
eed98e0c5d25c9f77f812434de62f00a40c08306
|
/soda.py
|
7302d03a7bc48943a1d9d1eca796d826ec8be768
|
[] |
no_license
|
lucasmarshall/foodtrucks
|
https://github.com/lucasmarshall/foodtrucks
|
0e5822ec568c2770d0517c75934d266e2aa2d12e
|
a1caecc3c6a76798bd079b155be78ae97080de45
|
refs/heads/master
| 2020-06-05T05:19:47.658979 | 2013-10-19T19:30:39 | 2013-10-19T19:30:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import urllib, urllib2, copy, json, os, urlparse
from redis_cache import SimpleCache, cache_it
redis_conf = urlparse.urlparse(os.getenv('REDISTOGO_URL', 'redis://redistogo:42a7d091c0e7fbe5608e71d37c0b90cd@beardfish.redistogo.com:10647/'))
cache = SimpleCache(limit=100, expire=60 * 60, hashkeys=True, host=redis_conf.hostname, port=redis_conf.port, password=redis_conf.password)
class SodaQuery(object):
"""
Generate a query string for querying a SODA API
See: http://dev.socrata.com/docs/queries
"""
__operator_mapping = {
'gt' : '>',
'lt' : '<',
'gte': '>=',
'lte': '<=',
'eq' : '=',
}
def __init__(self, endpoint):
self.__endpoint = endpoint
self.__where = {}
self.__select = []
self.__order = None
self.__limit = None
self.__offset = None
def where(self, **kwargs):
"""
Add a where clause to a query.
Only supports ANDing the query parts for now.
Keyword arguments should be the field to include in the query, with an optional operator
If an operator is not specified, it defaults to __eq
Example:
To generate a query like SELECT * WHERE age > 32, do:
query.where(age__gt = 32)
Supported operators include:
field__gt = field > value
field__not_gt = NOT field > value
field__lt = field < value
field__not_lt = NOT field < value
field__gte = field >= value
field__not_ge = NOT field >= value
field__lte = field <= value
field__not_le = NOT field <= value
field__eq = field = value
field__not_eq = NOT field = value
field__null = is null if value is True, is not null if value is False
field__within_box = geolocate field within the box described by the value: a sequence of 4 coordinates
field__not_within_box = geolocate field NOT within the box described by the value: a sequence of 4 coordinates
"""
obj = copy.deepcopy(self)
obj.__where.update(kwargs)
return obj
def select(self, *args):
""" Add a select clause to a query """
obj = copy.deepcopy(self)
obj.__select.extend(args)
return obj
def order(self, order):
"""
Order the query by the field provided.
For an acending order, just provide the field: 'distance'
For decending order, precede the fieldname with a minus: '-distance'
"""
obj = copy.deepcopy(self)
obj.__order = order
return obj
def limit(self, limit):
""" Limit the number of results to the number provided """
obj = copy.deepcopy(self)
obj.__limit = limit
return obj
def offset(self, offset):
""" Only the results in the set from the offset onward """
obj = copy.deepcopy(self)
obj.__offset = offset
return obj
def execute(self):
""" Execute the query """
query = self.__build_query()
return self.__do_query(query)
@cache_it(cache=cache)
def __do_query(self, query):
response = self.__do_request(query)
if response.getcode() == 200:
try:
return json.loads(response.read())
except ValueError:
SodaError("Couldn't decode JSON response")
raise SodaError("Can't fetch URL %s, got HTTP code %s" % (response.geturl(), reponse.getcode()))
def __do_request(self, query):
try:
return urllib2.urlopen(self.__endpoint + '?' + urllib.urlencode(query))
except urllib2.URLError, e:
raise SodaError("Can't fetch URL %s?%s: %s" % (self.__endpoint, urllib.urlencode(query), e))
def __getitem__(self, key):
""" Allow slicing - only supports up to 2 element slicing and positive indices, so no skipping! """
obj = self
try:
if key.start is not None:
assert key.start > 0, "Only positive indices are supported"
if key.stop is not None:
assert key.stop > 0, "Only positive indices are supported"
obj = obj.limit((key.stop - key.start) if key.start is not None else key.stop)
except (AttributeError, TypeError):
obj = obj.offset(key)
obj = obj.limit(1)
return obj
def __build_query(self):
query = {}
if len(self.__where):
query['$where'] = self.__build_where()
if len(self.__select):
query['$select'] = self.__build_select()
if self.__order is not None:
query['$order'] = self.__build_order()
if self.__offset is not None:
query['$offset'] = self.__offset
if self.__limit is not None:
query['$limit'] = self.__limit
return query
def __build_where(self):
queries = []
for field, value in self.__where.iteritems():
operator = 'eq'
parts = field.rsplit('__', 1)
do_not = False
if len(parts) == 2:
field, operator = parts
if operator[0:4] == 'not_':
do_not = True
_, operator = operator.split('_')
# special case for __within_box
if operator == 'within_box':
assert (isinstance(value, list) and len(value) == 4), "__within_box queries must be a sequence of four coordinates"
queries.append('within_box(%s,%s)' % (field, ','.join(value)))
continue
# special case for __null
if operator == 'null':
query = "%s IS NULL" if value else "%s IS NOT NULL"
queries.append(query % field)
continue
operator = self.__get_operator(operator)
queries.append("%s%s %s '%s'" % ("NOT " if do_not else "", field, operator, value))
return ' AND '.join(queries)
def __build_select(self):
return ', '.join(self.__select)
def __build_order(self):
if self.__order[0] == '-':
return '%s DESC' % self.__order[1:]
return self.__order
def __get_operator(self, operator):
try:
return self.__operator_mapping[operator]
except KeyError:
raise KeyError("Invalid operator %s" % operator)
class SodaError(Exception):
pass
|
UTF-8
|
Python
| false | false | 2,013 |
14,482,629,746,209 |
1543f73308e0a2e89448bf1573d584cb66e204e2
|
b91ec31e2f52ab7e9a52df713aafe471eac7f7d0
|
/video-gif.py
|
6a34d2d4e13d0de3c849c01df6a86fcfffe98eaf
|
[
"MIT"
] |
permissive
|
matematikaadit/utility
|
https://github.com/matematikaadit/utility
|
da4317fba92d0cd564438a262ab850263f3894f1
|
3369e727a160ad37b03093854a025609ceba565e
|
refs/heads/master
| 2020-06-02T15:48:44.316459 | 2014-03-12T22:32:59 | 2014-03-12T22:32:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import subprocess as subp
import sys
import re
import glob
def main():
if len(sys.argv) < 1:
usage()
sys.exit(1)
generate_image2()
image_seq = get_image_seq()
dither = get_dither(image_seq)
gif_name = get_gif_name()
reply = "y"
while reply == "y":
generate_gif(image_seq, dither, gif_name)
reply = input("Generating gif again? (y/n): ")
if reply == "y":
image_seq = get_image_seq()
clear_png()
def usage():
print("usage: {} video".format(sys.argv[0]))
def get_gif_name():
gif_name = input("gif name: ")
gif_name = re.sub(r'\.gif', '', gif_name) + '.gif'
return gif_name
def generate_image2():
ss = input("start seek time (hh:mm:ss): ")
validate(ss, r"\d{,2}:\d{,2}:\d{,2}")
t = input("duration: ")
validate(t, r"\d+")
cmd = [
"ffmpeg",
"-ss", ss,
"-i", sys.argv[1],
"-t", t,
"-s", "480x270",
"-f", "image2",
"%03d.png",
]
try:
subp.call(cmd)
except:
print("Error when generating image")
print(sys.exc_info()[1])
sys.exit(1)
def validate(s, format):
if re.search(format, s):
return
print("Error in input format,")
print("Got: {}".format(s))
print("Expect: {}".format(format))
sys.exit(1)
def get_dither(img_seq):
prepare_cmd = [
"-append",
"-format", "%k",
"info:"
]
dither = 6
for i in range(9, 6, -1):
prefix_cmd = get_prefix_cmd(img_seq, i)
output = try_call(prefix_cmd, prepare_cmd)
if output <= 256:
dither = i
break
return dither
def try_call(pref, prep):
try:
output = subp.check_output(pref + prep)
except:
print("Error in preparing image")
print(sys.exc_info()[1])
sys.exit(1)
return int(output)
def get_image_seq():
full = input("image sequence (start end): ")
start, end = [int(i) for i in full.split(" ")]
imger = lambda i: "{:03d}.png".format(i)
return [imger(i) for i in range(start, end, 3)]
def get_prefix_cmd(img_seq, dither):
start = [
"convert",
"-delay", "1x8",
]
end = [
"-ordered-dither", "o8x8,{}".format(dither),
"-coalesce",
"-layers", "OptimizeTransparency",
]
return start + img_seq + end
def generate_gif(img_seq, dither, gif_name):
final_cmd = [
"+map",
gif_name,
]
prefix_cmd = get_prefix_cmd(img_seq, dither)
try:
subp.call(prefix_cmd + final_cmd)
except:
print("Generating gif error")
print(sys.exc_info()[1])
sys.exit(1)
def clear_png():
png_files = glob.glob("???.png")
try:
subp.call(["rm"] + png_files)
except:
print("Clearing PNG Error")
print(sys.exc_info()[1])
sys.exit(1)
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
19,104,014,536,130 |
b3f9bb452947f07989a9cbcb7ea0d038ef81fb5d
|
eda8e0a1a9f9337a6ec150935530de7143f84f3c
|
/magentoerpconnect_catalog/product_category.py
|
b4d1961700a0a07fc6fe4b25f7c39d575770e273
|
[] |
no_license
|
uynil/magentoconnector
|
https://github.com/uynil/magentoconnector
|
0e261ad99c68dfeea7afe83c198776ccd4741e93
|
ec9f03c4f8952fc7dd0aa6f2ee1b0124fa156c9f
|
refs/heads/master
| 2020-12-24T14:37:06.099593 | 2014-05-27T07:51:09 | 2014-05-27T07:51:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from openerp.osv import fields, orm, osv
from openerp.addons.connector.unit.mapper import (mapping,
changed_by,
ExportMapper)
from openerp.addons.magentoerpconnect.unit.delete_synchronizer import (
MagentoDeleteSynchronizer)
from openerp.addons.magentoerpconnect.unit.export_synchronizer import (
MagentoExporter)
from openerp.addons.magentoerpconnect.backend import magento
from openerp.addons.magentoerpconnect.product_category import ProductCategoryAdapter
@magento
class ProductCategoryDeleteSynchronizer(MagentoDeleteSynchronizer):
""" Product deleter for Magento """
_model_name = ['magento.product.category']
@magento
class ProductCategoryExport(MagentoExporter):
_model_name = ['magento.product.category']
@magento
class ProductCategoryExportMapper(ExportMapper):
_model_name = 'magento.product.category'
direct = [('description', 'description'),
# ('country', 'country'),
('name', 'name'), #change that to mapping top level category has no name
]
@mapping
def sort(self, record):
return {'default_sort_by':'price', 'available_sort_by': 'price'}
@mapping
def parent(self, record):
""" Magento root category's Id equals 1 """
parent_id = record.magento_parent_id.magento_id
if not parent_id:
parent_id = 1
return {'parent_id':parent_id}
@mapping
def active(self, record):
is_active = record['is_active']
if not is_active:
is_active = 0
return {'is_active':is_active}
@mapping
def menu(self, record):
include_in_menu = record['include_in_menu']
if not include_in_menu:
include_in_menu = 0
return {'include_in_menu':include_in_menu}
|
UTF-8
|
Python
| false | false | 2,014 |
11,244,224,408,946 |
7902937b1fe660c7d68f841cb789e47077b274b4
|
b8e6c7e165947b7f8bce9a9837c335b057199d1d
|
/fca/readwrite/xml_.py
|
c3cc774a33982968f28a14573044d00e6f40eb69
|
[] |
no_license
|
Nestynov/fca
|
https://github.com/Nestynov/fca
|
81879704611baab0938d52ad2512f0ec2010838a
|
01b89181c30a6bd32677d4d1a8a677819b738330
|
refs/heads/master
| 2020-12-01T03:05:59.601265 | 2009-11-19T22:07:08 | 2009-11-19T22:07:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""Holds functions that read a concept system from .xml file"""
import xml.parsers.expat
import fca
def read_xml(path):
"""Read concept system from valid xml file.
Examples
========
>>> cs = read_xml('tests/concepts.xml')
>>> print cs
(["u'obj1'", "u'obj2'", "u'obj3'"], ["u'attr1'"])
(["u'obj2'", "u'obj3'"], ["u'attr1'", "u'attr2'"])
(["u'obj1'"], ["u'attr1'", "u'attr3'"])
([], M)
"""
global new_obj, new_attr, cs
cs = fca.ConceptSystem()
new_obj = None
new_attr = None
objects = []
d_objects = {}
attributes = []
d_attributes = {}
new_intent = []
new_extent = []
def start_element(name, attrs):
global new_obj, new_attr
global new_extent, new_intent
if name == "object":
if "id" in attrs.keys():
new_obj = attrs["id"]
elif "ref" in attrs.keys():
new_extent.append(d_objects[attrs["ref"]])
elif name == "attribute":
if "id" in attrs.keys():
new_attr = attrs["id"]
elif "ref" in attrs.keys():
new_intent.append(d_attributes[attrs["ref"]])
elif name == "concept":
new_intent = []
new_extent = []
def end_element(name):
global cs, new_intent, new_extent
if name == "concept":
cs.append(fca.Concept(new_extent, new_intent))
new_extent = []
new_intent = []
def char_data(data):
if data[0] == "\n":
return
data = data.strip()
global new_obj, new_attr
if new_obj:
d_objects[new_obj] = repr(data)
objects.append(repr(data))
new_obj = None
elif new_attr:
d_attributes[new_attr] = repr(data)
attributes.append(repr(data))
new_attr = None
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
f = open(path)
p.ParseFile(f)
return cs
if __name__ == "__main__":
import doctest
doctest.testmod()
|
UTF-8
|
Python
| false | false | 2,009 |
1,906,965,481,960 |
6f78ecfca50a86a71e2f56365da16eb0110fc198
|
a704892d86252dde1bc0ff885ea5e7d23b45ce84
|
/addons-extra/base_partner_relation/partner_relation.py
|
abe8f3f6e39f4582bfb14acc00da45f8c5100726
|
[] |
no_license
|
oneyoung/openerp
|
https://github.com/oneyoung/openerp
|
5685bf8cce09131afe9b9b270f6cfadf2e66015e
|
7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5
|
refs/heads/master
| 2016-03-31T18:22:41.917881 | 2013-05-24T06:10:53 | 2013-05-24T06:10:53 | 9,902,716 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
class res_partner_relation(osv.osv):
_description='Partner Relation'
_name = "res.partner.relation"
_columns = {
'name': fields.selection( [ ('default','Default'),('invoice','Invoice'), ('delivery','Delivery'), ('contact','Contact'), ('other','Other') ],'Relation Type', required=True),
'partner_id': fields.many2one('res.partner', 'Main Partner', required=True, ondelete='cascade'),
'relation_id': fields.many2one('res.partner', 'Relation Partner', required=True, ondelete='cascade')
}
_defaults = {
'name' : lambda *a: 'invoice',
}
res_partner_relation()
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'relation_ids': fields.one2many('res.partner.relation', 'partner_id', 'Relations')
}
def _is_related_to(self, cr, uid, ids, toid):
related=[]
for id in ids:
cr.execute("select id from res_partner_relation where (partner_id=%s and relation_id=%s) or (partner_id=%s and relation_id=%s)" % (id,toid,toid,id))
res=cr.fetchone()
if res and len(res):
related.append(True)
else:
related.append(False)
return related
def address_get(self, cr, uid, ids, adr_pref=['default']):
todo = []
result = {}
cr.execute('select name,relation_id from res_partner_relation where partner_id in ('+','.join(map(str,ids))+')')
adrs = dict(cr.fetchall())
for adr in adr_pref:
if adr in adrs:
adr_prov = super(res_partner, self).address_get(cr, uid, [adrs[adr]], [adr]).values()[0]
result[adr] = adr_prov
else:
todo.append(adr)
if len(todo):
result.update(super(res_partner, self).address_get(cr, uid, ids, todo))
return result
res_partner()
|
UTF-8
|
Python
| false | false | 2,013 |
10,264,971,839,105 |
fa956f83cc04b35b544fd0326ef347bbeb4485a8
|
96c6f25543d9e826191a70e1fd4e70185722e002
|
/vnccollab/theme/portlets/navigation.py
|
c1de2a303d08a6c53cdecef010ec495a40b78e27
|
[
"GPL-2.0-or-later"
] |
non_permissive
|
vnc-biz/vnccollab.theme
|
https://github.com/vnc-biz/vnccollab.theme
|
668c790186fda2d35a4d136be89c35249c9f647f
|
dc0080c555f5ef94f67c1f86707a872fd41ad5ec
|
refs/heads/master
| 2021-01-20T05:58:26.662359 | 2014-07-03T21:56:27 | 2014-07-03T21:56:27 | 18,652,520 | 0 | 0 | null | false | 2014-04-11T15:33:09 | 2014-04-10T21:30:00 | 2014-04-11T10:42:02 | 2014-04-11T10:42:01 | 0 | 0 | 1 | 1 |
JavaScript
| null | null |
from zope.interface import implements
from zope.component import getMultiAdapter
from plone.app.portlets.portlets import navigation
from plone.memoize.instance import memoize
from plone.app.layout.navigation.interfaces import INavtreeStrategy
from plone.app.layout.navigation.interfaces import INavigationQueryBuilder
from plone.app.layout.navigation.navtree import buildFolderTree
from Products.ATContentTypes.interfaces import IATFolder
from Acquisition import aq_inner
allowed_types = ('Folder', 'Large Folder', 'Large Plone Folder', 'Collection', 'Topic')
def _filter(data):
to_remove = []
selected = False
try:
for k, d in enumerate(data['children']):
if d['portal_type'] not in allowed_types:
to_remove.append(k)
else:
r, s = _filter(d)
d['children'] = r['children']
d['currentItem'] = s if not d['currentItem'] else True
for i in reversed(to_remove):
selected = data['children'][i]['currentItem']
del data['children'][i]
except:
return data, selected
return data, selected
class NavtreeStrategyBase(navigation.NavtreeStrategy):
"""Basic navigation tree strategy that does nothing.
"""
implements(INavtreeStrategy)
def nodeFilter(self, node):
result = False
item = node['item']
for t in allowed_types:
if item.getObject().portal_type in allowed_types:
result = True
break
return result
class Renderer(navigation.Renderer):
@memoize
def getNavTree(self, _marker=[]):
context = aq_inner(self.context)
queryBuilder = getMultiAdapter((context, self.data), INavigationQueryBuilder)
strategy = NavtreeStrategyBase(context, self.data)
result = buildFolderTree(context, obj=context, query=queryBuilder(), strategy=strategy)
result, _ = _filter(result)
return result
|
UTF-8
|
Python
| false | false | 2,014 |
6,425,271,074,857 |
08880f635fc36a21979e86da48de04ae71486950
|
cf70dd4bfc607164f30851a6b210cfc7f5f7c692
|
/ejercicio4/ejercicio4.py
|
6e10dd3b050b8d0106832c7e3fa58e79f5f09ed0
|
[] |
no_license
|
ReynaldOG/TAREA-1
|
https://github.com/ReynaldOG/TAREA-1
|
33cb7d236aae6f4c0317b0e2adc2cdd01eff1ff0
|
babaa98298ca3e59fa7f6e87afba04399cd5d347
|
refs/heads/master
| 2021-01-02T08:57:11.627245 | 2014-09-03T07:33:12 | 2014-09-03T07:33:12 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def ValorCadena(x, y):
a=len(x)
b=len(y)
if a > b:
print x
else:
if a==b:
print x + y
else:
print y
e=raw_input("ingrese la primera palabra")
f=raw_input("ingrese la segunda palabra")
ValorCadena(e, f)
|
UTF-8
|
Python
| false | false | 2,014 |
13,357,348,294,930 |
9e7c52bc3162685a1ff0d0e73331274b65e9f0c5
|
2698796686ab966483cf529611d7f6ac0f92b6c9
|
/mmp/settings.py
|
26dc3620f1f6ba7b3bcda3070c2b8511acee07a3
|
[] |
no_license
|
ahmadix/mmp
|
https://github.com/ahmadix/mmp
|
418d45b4e2f97521a62c383088c69578f83cc1cc
|
b6469ffad88a5166498df12ed680eb283e7d56e6
|
refs/heads/master
| 2016-09-06T17:18:55.776677 | 2013-04-08T14:45:04 | 2013-04-08T14:45:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Django settings for mmp project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# SECURITY WARNING: keep the secret key used in production secret!
# Hardcoded values can leak through source control. Consider loading
# the secret key from an environment variable or a file instead.
SECRET_KEY = 'yfd1x8f07&cd(^)f9n@n@@6jpikqc_@jf1f4t*nvw#n=+wpg-o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
'south',
'facebook_comments',
# my apps
'home',
'people',
'items',
'actions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
#for facebook_comments to work
'django.core.context_processors.request',
# my context_processors
# this one for including login form in every page
"people.views.include_auth_forms",
)
ROOT_URLCONF = 'mmp.urls'
WSGI_APPLICATION = 'mmp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
HOST = ''
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mmp_local',
'USER': 'postgres',
'PASSWORD': '123',
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
#templates directories
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# static files directories
STATICFILES_DIRS = (
('base', os.path.join(BASE_DIR, 'bootstrap')),
('css', os.path.join(BASE_DIR, 'bootstrap/css')),
('js', os.path.join(BASE_DIR, 'bootstrap/js')),
('img', os.path.join(BASE_DIR, 'bootstrap/img')),
('less', os.path.join(BASE_DIR, 'bootstrap/less')),
('sm2', os.path.join(BASE_DIR, 'sm2')),
)
# custom user model
AUTH_USER_MODEL = 'people.Person'
#media directory
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#media url
MEDIA_URL = '/media/'
LOGIN_URL = 'login_person'
LOGOUT_URL = 'front_page'
|
UTF-8
|
Python
| false | false | 2,013 |
7,524,782,744,455 |
c4f7baf7baf675e329dab5ea86941d711bb60dc5
|
b6a882577dab023b61b1c74ac3fc2481f3575d14
|
/Allura/allura/tests/model/test_monq.py
|
bf46da247cb36784979dd10adceaa1080bd3392b
|
[
"Apache-2.0"
] |
permissive
|
Straatjes/allura
|
https://github.com/Straatjes/allura
|
d48d31d26d8eb2718aa87d652dc20eabcb7da252
|
aa965a5b117fb539ca3185bb3c9090400ce988d6
|
refs/heads/master
| 2020-03-17T09:53:21.538185 | 2012-10-04T06:29:00 | 2012-10-04T06:29:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import pprint
from nose.tools import with_setup
from ming.orm import ThreadLocalORMSession
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
def setUp():
setup_basic_test()
ThreadLocalORMSession.close_all()
setup_global_objects()
M.MonQTask.query.remove({})
@with_setup(setUp)
def test_basic_task():
task = M.MonQTask.post(pprint.pformat, ([5,6],))
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
task = M.MonQTask.get()
assert task
task()
assert task.result == 'I[5, 6]', task.result
|
UTF-8
|
Python
| false | false | 2,012 |
11,673,721,147,514 |
db48f76957dd9b39ad1a960ebffb17ed4b727042
|
e5e46e6ae8f3d397aa0ecf83416cb041de76fcdf
|
/example/sample_config.py
|
34b00137bae981be3dac26fb973cd867ad06a67e
|
[] |
no_license
|
khiltd/Ghola
|
https://github.com/khiltd/Ghola
|
34f0d33cc65aa7935dfa7a85b63fe7857cb35669
|
0f308fe2f847642b6198c78c5dad110ee124592e
|
refs/heads/master
| 2016-09-10T00:15:18.989304 | 2012-11-23T19:28:42 | 2012-11-23T19:28:42 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import ghola, re, colorsys
from string import Template
#Mandatory
ghola.config["input_directories"] = ["./example/input"]
ghola.config["output_directory"] = "./example/output"
#Optional
ghola.config["jinja_environment"] = {
"trim_blocks": True,
"autoescape": False,
}
#We can define simple and intelligent macros that exhibit none of
#the strangeness of Jinja2 syntax and require absolutely
#no knowledge of its bizarre, poorly documented, "needle dropping"
#parser mechanics. Just write a plain Python function that accepts the
#number of arguments you plan to pass it, and viola! You can call
#it from your template.
@ghola.ezmacro(r"\.(html|css|j2)$")
def border_radius(radius, **optional_kwargs):
#print optional_kwargs
brick = Template("-webkit-border-radius: $radius; -moz-border-radius: $radius; border-radius: $radius;")
return brick.substitute(radius=radius)
# --- #
def hex_to_rgb(hex_color):
if len(hex_color) is 7:
parsed = re.search(r"(#)(\w{2})(\w{2})(\w{2})", hex_color).groups()
else:
parsed = re.search(r"(#)(\w)(\w)(\w)", hex_color).groups()
return [(int(c, 16) / 255.0) for c in parsed[1:]]
# --- #
def rgb_to_hex(rgb):
return "#%02x%02x%02x" % (rgb[0] * 255, rgb[1] * 255, rgb[2] * 255)
# --- #
def alter_hex_rgb_luminosity(hex_color, delta):
rgb = hex_to_rgb(hex_color)
hls = list(colorsys.rgb_to_hls(*rgb))
#Boom boom boom
hls[1] = max(0, min(1.0, hls[1] + delta / 255.0))
#Let's go back to my room
rgb = colorsys.hls_to_rgb(*hls)
output = rgb_to_hex(rgb)
return output
# --- #
@ghola.ezmacro(r"\.(html|css|j2)$")
def shade(hex_color, delta):
return alter_hex_rgb_luminosity(hex_color, delta * -1)
# --- #
@ghola.ezmacro(r"\.(html|css|j2)$")
def tint(hex_color, delta):
return alter_hex_rgb_luminosity(hex_color, delta)
# --- #
@ghola.ezmacro(r"\.(html|css|j2)$")
def compliment(hex_color):
rgb = hex_to_rgb(hex_color)
hls = list(colorsys.rgb_to_hls(*rgb))
hls[0] += 0.5
if hls[0] > 1.0:
hls[0] -= 1.0
rgb = colorsys.hls_to_rgb(*hls)
return rgb_to_hex(rgb)
# --- #
ghola.config["rendering_context"] = {
"text_color": "#8d4242",
"heading_color": "#555",
"standard_radius": "20px"
}
|
UTF-8
|
Python
| false | false | 2,012 |
1,279,900,288,936 |
5620e92a7dcc6c0527bcecd6df4b292a6a693585
|
c29cb1bd4fb4df3cabdaf3814e2690dce338c658
|
/Deprecated Python/symptom.py
|
23c50558cbf2e006e94a2f3d1b0c6f342b5e3a7b
|
[] |
no_license
|
siucornellcup/client
|
https://github.com/siucornellcup/client
|
6f58606c35ab9aefcb1ec757b43cfdcb9b7926cc
|
1be7b2eec705315de0719c649dc6c7a6ed63b039
|
refs/heads/master
| 2021-01-13T02:11:16.634359 | 2014-06-21T23:03:44 | 2014-06-21T23:03:44 | 17,734,127 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PySide.QtCore import QObject, Signal, Property, Slot
class Symptom(QObject):
def __init__(self):
location = None
intensity = None
painType = None
duration = None
QObject.__init__(self)
def set_duration(self, duration):
self.duration = duration
def get_duration(self):
return self.duration
def set_location(self, location):
self.location = location
def get_location(self):
return self.location
def set_intensity(self, intensity):
self.intensity = intensity
def get_intensity(self):
return self.intensity
def set_painType(self, painType):
self.painType = painType
def get_painType(self):
return self.painType
@Signal
def duration_changed(self):
pass
@Signal
def location_changed(self):
pass
@Signal
def intensity_changed(self):
pass
@Signal
def painType_changed(self):
pass
s_location = Property(unicode, get_location, set_location, notify = location_changed)
s_intensity = Property(unicode, get_intensity, set_intensity, notify = intensity_changed)
s_painType = Property(unicode, get_painType, set_painType, notify = painType_changed)
s_duration = Property(unicode, get_duration, set_duration, notify = duration_changed)
|
UTF-8
|
Python
| false | false | 2,014 |
17,274,358,499,353 |
2b607c3f9a3a3f4cfa0a6bf894a1be1afaf48011
|
e868ad77c7942de1a9e0ff9bbffa332b5dbc629f
|
/scraper.py
|
feab89d55e9cbebfc5ddba8c14a88a9fd873852a
|
[] |
no_license
|
rememberlenny/12ozprophet-scraper
|
https://github.com/rememberlenny/12ozprophet-scraper
|
3ba119632388057508a2886e94597c045c5b16d5
|
0b55dc6da4bf4f370ef0a44f5e6090bc03054ee0
|
refs/heads/master
| 2016-09-10T19:32:39.138165 | 2014-05-30T19:57:33 | 2014-05-30T19:57:33 | 20,340,710 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import scraperwiki
import requests
import lxml.html
# Constants
numberofposts = 7844
post_iteration = 0
user_iteration = 0
image_iteration = 0
# One-time use
global savedusername, date_published, post_image
savedusername = 'null'
dateremovetitle = """N E W Y O R K C I T Y - """
dateremovere = """Re:"""
ignoredimages = ['images/12oz/statusicon/post_old.gif','images/12oz/buttons/collapse_thead.gif','images/12oz/statusicon/post_new.gif','images/12oz/reputation/reputation_pos.gif','images/12oz/reputation/reputation_highpos.gif','images/icons/icon1.gif','images/12oz/buttons/quote.gif','clear.gif', 'images/12oz/attach/jpg.gif','images/12oz/reputation/reputation_neg.gif','images/12oz/reputation/reputation_highneg.gif','images/12oz/statusicon/post_new.gif']
for i in range(1, numberofposts):
html = requests.get("http://www.12ozprophet.com/forum/showthread.php?t=128783&page="+ str(i)).content
dom = lxml.html.fromstring(html)
print 'Page: ' + str(i)
for posts in dom.cssselect('#posts'):
for table in posts.cssselect('table'):
try:
username = table.cssselect('a.bigusername')[0].text_content()
if username != savedusername:
if username != 'null':
savedusername = username
except IndexError:
username = 'null'
try:
post_iteration = post_iteration + 1 #my unique post id
postdate = table.cssselect('td.alt1 div.smallfont')[0].text_content()
postdate = postdate.replace(dateremovetitle, '')
postdate = postdate.replace(dateremovere, '')
postdate = postdate.strip()
date_published = postdate
# print '---'
# print savedusername +' '+ postdate + ', ID: ' + str(iteration)
except IndexError:
postdate = 'null'
for img in table.cssselect('img'):
imagesrc = img.get('src')
imagematch = 'false'
for image in ignoredimages:
if image == imagesrc:
imagematch = 'true'
if imagematch != 'true':
image_iteration = image_iteration + 1
post_image = imagesrc
post = {
'image_id': image_iteration,
'image_url': post_image,
'post_id': post_iteration,
'user': savedusername,
'date_published': date_published,
}
print post
scraperwiki.sql.save(['image_id'], post)
|
UTF-8
|
Python
| false | false | 2,014 |
12,558,484,416,686 |
83ab670e68d0b7ea87b4ca4073c1bb1bee80fd35
|
45ba26df8594c8fa35c371e757a339d9928a2274
|
/mugqic_pipeline/python/bio/snpeff.py
|
297da3cb8960c6abdb752bd922443738812542bd
|
[] |
no_license
|
jtremblay/MUGQIC_perl
|
https://github.com/jtremblay/MUGQIC_perl
|
c65dbcda00794721e44442cbe10f753d68a5071e
|
ff15d20e58de5ceb1f6db169b592ab7a7cf8c9e5
|
refs/heads/master
| 2020-04-09T16:53:54.972968 | 2014-07-25T19:08:49 | 2014-07-25T19:08:49 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# Python Standard Modules
# MUGQIC Modules
from core.config import *
from core.job import *
def compute_effects(input, output, split=False):
output_stats = output + ".stats.csv"
job = Job([input], [output, output_stats], [['compute_effects', 'moduleVersion.java'], ['compute_effects', 'moduleVersion.snpeff']])
job.command = \
"""java -Djava.io.tmpdir={tmp_dir} {extra_java_flags} -Xmx{ram} -jar \$SNPEFF_HOME/snpEff.jar eff {options} \\
-c \$SNPEFF_HOME/snpEff.config \\
-i vcf \\
-o vcf \\
-csvStats \\
-stats {output_stats} \\
{reference_snpeff_genome} \\
{input}{output}""".format(
tmp_dir=config.param('compute_effects', 'tmpDir'),
extra_java_flags=config.param('compute_effects', 'extraJavaFlags'),
ram=config.param('compute_effects', 'ram'),
options=config.param('compute_effects', 'options', required=False),
output_stats=output_stats,
reference_snpeff_genome=config.param('compute_effects', 'referenceSnpEffGenome'),
input=input,
output=" \\\n > " + output if output else ""
)
if split:
split_output_stats = output + ".statsFile.txt"
split_job = Job([output_stats], [split_output_stats], [['compute_effects', 'moduleVersion.tools']])
split_job.command = \
"""splitSnpEffStat.awk \\
{output_stats} \\
{output_part} \\
{split_output_stats}""".format(
output_stats=output_stats,
output_part=output + ".part",
split_output_stats=split_output_stats
)
job = concat_jobs([job, split_job])
return job
def snpsift_annotate(input, output):
job = Job([input], [output], [['snpsift_annotate', 'moduleVersion.java'], ['snpsift_annotate', 'moduleVersion.snpeff']])
job.command = \
"""java -Djava.io.tmpdir={tmp_dir} {extra_java_flags} -Xmx{ram} -jar \$SNPEFF_HOME/SnpSift.jar annotate \\
{db_snp} \\
{input}{output}""".format(
tmp_dir=config.param('snpsift_annotate', 'tmpDir'),
extra_java_flags=config.param('snpsift_annotate', 'extraJavaFlags'),
ram=config.param('snpsift_annotate', 'ram'),
db_snp=config.param('snpsift_annotate', 'dbSnp', type='filepath'),
input=input,
output=" \\\n > " + output if output else ""
)
return job
def snpsift_dbnsfp(input, output):
job = Job([input], [output], [['snpsift_dbnsfp', 'moduleVersion.java'], ['snpsift_dbnsfp', 'moduleVersion.snpeff']])
job.command = \
"""java -Djava.io.tmpdir={tmp_dir} {extra_java_flags} -Xmx{ram} -jar \$SNPEFF_HOME/SnpSift.jar dbnsfp \\
-v {db_nsfp} \\
{input}{output}""".format(
tmp_dir=config.param('snpsift_dbnsfp', 'tmpDir'),
extra_java_flags=config.param('snpsift_dbnsfp', 'extraJavaFlags'),
ram=config.param('snpsift_dbnsfp', 'ram'),
db_nsfp=config.param('snpsift_dbnsfp', 'dbNSFP', type='filepath'),
input=input,
output=" \\\n > " + output if output else ""
)
return job
|
UTF-8
|
Python
| false | false | 2,014 |
16,844,861,739,093 |
83c55fc6b146f6b5339c3d18df78d137585054e8
|
d7760325a86c130c75adbea0208656b698cda6f2
|
/unfolder/graph/spanning_trees.py
|
0cffa24f73585e931a0bdc82bdb09981ac87542d
|
[
"GPL-3.0-only"
] |
non_permissive
|
kbendick/paper-model-tools
|
https://github.com/kbendick/paper-model-tools
|
a554c637d1edb2ae7dd42fc22568a6e27bdcc90f
|
7f92875c7fa63980f71e38c9e8f34bdab1dfdbf9
|
refs/heads/master
| 2017-12-30T00:32:52.480093 | 2014-05-09T21:22:11 | 2014-05-09T21:22:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from unfolder.graph.graph import Graph
def getSpanningTrees(graph: Graph):
return SpanningTreeIter(graph)
# private
class SpanningTreeIter:
def __init__(self, graph: Graph):
self.graph = graph.copy().impl
self.graph.edges.sort(key=lambda edge: edge.nodes[0])
# initial tree
self.T_0 = graph.getSpanningTree().impl
self._entrablesForSpanningTreeEdge = Entrables(self.T_0, self.graph).getEntrablesForSpanningTreeEdge
def __iter__(self):
V = len(self.T_0.edges)
yield self.T_0
for T in self._derivedSpanningTrees(self.T_0, V - 1):
yield T
def _derivedSpanningTrees(self, T_p, k):
if k >= 0:
e_k = self.T_0.edges[k]
for g in self._entrablesForSpanningTreeEdge(T_p, e_k):
T_c = self._replaceEdge(T_p, e_k, g)
if T_c and Graph(T_c).isTree():
yield T_c
for T in self._derivedSpanningTrees(T_c, k - 1):
yield T
for T in self._derivedSpanningTrees(T_p, k - 1):
yield T
def _replaceEdge(self, T_p, e_k, g):
T_c = Graph(T_p).copy().impl
index = T_c.edges.index(e_k)
T_c.edges[index] = g
return T_c
# private
class FundamentalCuts:
def __init__(self, graph: Graph):
self._allEdges = set(graph.edges)
def getCutFromSpanningTreeEdge(self, spanningTree, cutEdge):
if not cutEdge in spanningTree.edges:
raise ValueError('edge ' + str(cutEdge) + ' is not in spanning tree ' + str(spanningTree))
newTree = Graph(spanningTree).copy().impl
candidateEdges = self._allEdges - set(newTree.edges)
res = []
cutEdgeIndex = newTree.edges.index(cutEdge)
for edge in candidateEdges:
newTree.edges[cutEdgeIndex] = edge
if Graph(newTree).isTree():
res.append(edge)
return set(res)
class Entrables:
def __init__(self, initialTree, graph: Graph):
self._initialTree = initialTree
self._initialTreeCutCache = {}
self._getFundamentalCut = FundamentalCuts(graph).getCutFromSpanningTreeEdge
def getEntrablesForSpanningTreeEdge(self, tree, edge):
initialTreeCut = self._getInitialTreeCut(edge)
treeCut = self._getFundamentalCut(tree, edge)
for elem in initialTreeCut & treeCut:
yield elem
# private
def _getInitialTreeCut(self, edge):
if not edge in self._initialTreeCutCache:
cut = self._getFundamentalCut(self._initialTree, edge)
self._initialTreeCutCache[edge] = cut
return cut
else:
return self._initialTreeCutCache[edge]
|
UTF-8
|
Python
| false | false | 2,014 |
3,710,851,770,109 |
ab59d64f4799c5a12abebafcb5c467b98d5b75ee
|
eeae24db77843f95f0a1ebd413da99baa0eecd4d
|
/sieve.py
|
9f2543b78c3e09ea4e6588aea569b0eb01e7606b
|
[] |
no_license
|
6LTM/sieve_of_eratosthenes
|
https://github.com/6LTM/sieve_of_eratosthenes
|
149bf25c00debd756c5236d124b6301e510fba72
|
cee317a24ff403239ec94dc16cad4f8250179c07
|
refs/heads/master
| 2020-11-26T21:22:43.238703 | 2013-12-11T16:39:08 | 2013-12-11T16:39:08 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
This little program calculates the prime number up to a
given range using the "sieve of eratosthenes" algorithm.
it is called with "$: python sieve.py max_range"
where "max_range" is an integer between 2 and 7900
"""
import sys
def sieve(xs, primes=[]):
"""
This function calculates the prime numbers up to a given range.
The tested maximal range is 7900.
"""
if len(xs) == 0:
return primes
primes.append(xs[0])
return sieve([x for x in xs if x % xs[0] > 0], primes)
def find_primes(max_value):
"""
just a function to beautify...not really needed, but nice to have
"""
return sieve(range(2, max_value + 1))
if __name__ == "__main__":
if not sys.argv[1]:
print find_primes(50)
else:
print find_primes(int(sys.argv[1]))
|
UTF-8
|
Python
| false | false | 2,013 |
2,156,073,607,670 |
eb1a6dc6c95c7e77fde9780b950f1c1696bae914
|
8dc7bdc5df23b1940b0b6a094755d34423786041
|
/draw_long_cylinders.py
|
11582ba5b1d18403092853f1fa06b6641b9c5cf7
|
[] |
no_license
|
mens-amplio/pyprocessing-test
|
https://github.com/mens-amplio/pyprocessing-test
|
a6f50d9fac99a950db51ecb71d4b25a2aec4a3c2
|
1912c35431004a432c4917efbc13bf0fe2f8b7ed
|
refs/heads/master
| 2021-01-10T21:45:46.975686 | 2013-06-09T23:27:03 | 2013-06-09T23:27:03 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from emulator import *
from network import Network
from time import sleep
import math
import multiprocessing
import random
import time
# make a network and draw it
network = Network()
f = open("../modeling/tree.data.py")
data_tree = eval(f.read())
origin = network.add_node( 0, 0, 0 )
def resize_array(ary, index):
while len(ary) < index + 1:
ary.append(None)
def draw_tree(tree, below, lights_array = []):
lights_tree = []
for (index,rod,children) in tree:
((x1,y1,z1),(x2,y2,z2)) = rod
n1 = network.add_node( x1, y1, z1 )
n2 = network.add_node( x2, y2, z2 )
edge = ( n1, n2 )
resize_array(lights_array, index)
lights_array[index] = edge
network.add_edge( n1, n2, (255, 0, 0) )
network.add_edge( below, n1, (255, 255, 255) )
child_lights = draw_tree(children, n2, lights_array)
lights_tree.append( (edge, rod, child_lights) )
return lights_tree, lights_array
lights_tree, lights_array = draw_tree(data_tree, origin)
def select_light(ls, numbers):
index = numbers[0]
if len(ls) <= index:
return None
if len(numbers) > 1:
return select_light(ls[index][2], numbers[1:])
else:
return ls[index][0]
light_number = select_light(data_tree, [1,1])
light = lights_array[light_number]
if light:
network.add_edge(light[0], light[1], (0,100,255) )
print("ok")
# single-threaded, for now.
display = Emulator(network, False)
external_run(display.queue, display.network)
|
UTF-8
|
Python
| false | false | 2,013 |
1,692,217,117,580 |
bb1e7471e574011b321b2b5a38807f5ccdbf2cda
|
3569b74bfb211e518baa750d46f57b03db882144
|
/AppClientConfig/LogoutUrisParser.py
|
186dd8b93d2142ff95b2e6413e94eeddc721c6b8
|
[] |
no_license
|
donaldzch/learnpython1
|
https://github.com/donaldzch/learnpython1
|
9ad3212df427e6b30233acc118df631de0dae590
|
6bb944caf8af620380c7e213d14634ecd612ae19
|
refs/heads/master
| 2021-01-22T23:20:42.604710 | 2014-03-23T12:40:34 | 2014-03-23T12:40:34 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__author__ = 'Donald'
"""
<uri>http://localhost/clear/sid</uri>
<uri>http://localhost/logout</uri>
"""
from __init__ import *
def parse(logoutUris, attributes):
AttributeParser(logoutUris, attributes, 'logoutUris').parse()
|
UTF-8
|
Python
| false | false | 2,014 |
8,418,135,916,953 |
fa3d96c5aec276c2ff22c2e99aac1712ffb9aa9f
|
010116af80009af5853bdeb68fcaa6e80d9da224
|
/bot.py
|
94f8628f965755ee224a34d338bf1076439d3cf0
|
[
"MIT"
] |
permissive
|
TheNikomo/IRCBot
|
https://github.com/TheNikomo/IRCBot
|
2579d3224c389fa33704595abdec717b340822e3
|
935cfd27092720613af5067c4d8e128ba1f5426d
|
refs/heads/master
| 2020-06-02T00:40:08.013802 | 2014-05-02T09:12:48 | 2014-05-02T09:12:48 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python2
# Initial code from Joel Rosdahl <joel@rosdahl.net> (https://pypi.python.org/pypi/irc/, scripts/testbot.py)
# Rest of the code by Niko Montonen <nikomo@nikomo.fi> and <montonen.niko@gmail.com>
import resource
import inspect
import os
import random
import string
import irc.bot
import irc.strings
import modules.bitcoin as bitcoin
import modules.news as news
from lxml import html
from lepl.apps.rfc3696 import HttpUrl
from irc.client import ip_numstr_to_quad, ip_quad_to_numstr
class ChatBot(irc.bot.SingleServerIRCBot):
def __init__(self, code, channel, nickname, server, port=6667):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
self.code = code
def on_nicknameinuse(self, c, e):
raise RuntimeError("Nickname in use, failing spectacularly")
self.die()
def on_welcome(self, c, e):
c.join(self.channel)
print("Self-destruct code: " + self.code)
def on_privmsg(self, c, e):
a = e.arguments[0]
if len(a) > 1:
try:
self.do_command(e, a, "private")
except Exception, e:
print e
if e.arguments[0] == self.code:
self.die()
return
def on_pubmsg(self, c, e):
a = e.arguments[0]
if len(a) > 1:
try:
self.do_command(e, a, "public")
except Exception, e:
print e
if e.arguments[0] == self.code:
self.die()
return
def do_command(self, e, cmd, target):
nick = e.source.nick.decode('utf-8')
try:
cmd = cmd.decode('utf-8')
except:
return
c = self.connection
channel = self.channel
validator = HttpUrl()
argcmd = cmd.split(" ")
if target == "private":
client = nick
else:
client = channel
if cmd == self.code:
print("Code accepted from " + nick + ", shutting off.")
self.die()
elif cmd == "!help":
c.privmsg(client, "Commands: !bitcoin, !news, !status, !help")
elif cmd == "!status":
memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
c.privmsg(client, "Currently using %s KiB of memory." % memory)
elif cmd == "!bitcoin":
if client == nick:
bitcoin.sendPrivatePrices(c, nick)
if client == channel:
bitcoin.sendPublicPrices(c, channel, nick)
bitcoin.sendPrivatePrices(c, nick)
elif argcmd[0] == "!news":
url = argcmd[1]
if validator(url):
news.readNews(c, client, url)
else:
return
# elif validator(cmd):
# try:
# website = html.parse(cmd)
# title = website.find(".//title").text
# c.privmsg(client, "%s: %s" % (nick, title))
# except:
# return
def main():
import sys
if len(sys.argv) != 4:
print("Usage: " + inspect.getfile(inspect.currentframe()) + " <server[:port]> <channel> <nickname>")
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
print("Server, channel and nickname set.")
code = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(24))
print("Self-destruct code generated")
bot = ChatBot(code, channel, nickname, server, port)
print("Bot set, connecting...")
bot.start()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
9,723,805,962,761 |
e3f91d244fc6d42031f6216ca324a475dcbc8979
|
71bdfb695f6fc826071d3a1bf0eb4ee632576fde
|
/blog/urls.py
|
a334c7ecc69155abc10d9aa7ac227dcc7db8882a
|
[] |
no_license
|
dayfray/Django_Test_Blog
|
https://github.com/dayfray/Django_Test_Blog
|
dc12c3598535b844fd629205b3d48e9f457758a9
|
4dd0b1be5ba5023c9642643280280069ab27ec62
|
refs/heads/master
| 2016-09-05T22:17:00.779986 | 2013-11-07T22:47:11 | 2013-11-07T22:47:11 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import patterns, include, url
from django.views.generic import ListView, DetailView
from blog.models import Post
from django.contrib.syndication.views import Feed
class BlogFeed(Feed):
title = "MySite"
description = "Whatever comes to mind"
link = "/blog/feed/"
def items(self):
return Post.objects.all().order_by("-pub_date")[:2]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.body
def item_link(self, item):
return u"/blog/%d" % item.id
urlpatterns = patterns('blog.views',
url(r'^$', ListView.as_view(
queryset = Post.objects.all().order_by("-pub_date")[:2],
template_name = "blog.html")),
url(r'^(?P<pk>\d+)$', DetailView.as_view(
model = Post,
template_name = "post.html")),
url(r'^archives/$', ListView.as_view(
queryset = Post.objects.all().order_by("-pub_date"),
template_name = "archives.html")),
url(r'^tag/(?P<tag>\w+)$', 'tagpage'),
url(r'^feed/$', BlogFeed()),
)
|
UTF-8
|
Python
| false | false | 2,013 |
13,563,506,724,958 |
72d19d40476da0c064ad3d654120c78a37c33ef7
|
80fcb8ec462738d2915c29ceab30a07b51f3361b
|
/pytest4.py
|
3faa4a3720172c7394f15fa766e450220be8f821
|
[] |
no_license
|
angellsl10/Leetcode
|
https://github.com/angellsl10/Leetcode
|
244fcc2dfaedb901d0ff7c4acb8c471c6a3f3c80
|
68656551c46fa3287f68f851b290da0904380de5
|
refs/heads/master
| 2016-09-05T22:52:46.920307 | 2014-08-20T22:56:23 | 2014-08-20T22:56:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
import time
from collections import deque
start=time.clock()
#the following is better than append
#l=[x for x in range(1000000)]
l=[]
for i in range(1000000):
l.append(i)
elapsed1=time.clock()-start
start=time.clock()
q=deque()
for i in range(1000000):
q.append(i)
elapsed2=time.clock()-start
print ('list append',elapsed1,'deque append',elapsed2)
start=time.clock()
for i in range(1000000):
l.pop(0)
elapsed3=time.clock()-start
start=time.clock()
for i in range(1000000):
q.popleft()
elapsed4=time.clock()-start
print ('list pop0',elapsed3,'deque popleft',elapsed4)
|
UTF-8
|
Python
| false | false | 2,014 |
77,309,438,121 |
504dd9c166d52d80e046f8de198c6eca809b2efa
|
a6e571edef5fdda9adedbd2abcbcd2dd5e4c7c9c
|
/Prototype/Miscellaneous/forms.py
|
b70b31735a5ad091fa2cd54811ad95dab939cecf
|
[] |
no_license
|
1101811b/DIM3-Team-Q-Fish
|
https://github.com/1101811b/DIM3-Team-Q-Fish
|
cd5ce885db3513873ce412e219aebe62c53e2561
|
b9791c649599a0069024f37ab6dac64e97d209a2
|
refs/heads/master
| 2016-09-05T22:42:22.553807 | 2014-03-21T09:40:44 | 2014-03-21T09:40:44 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django import forms
from Miscellaneous.models import *
class EmailForm(forms.ModelForm):
class Meta:
model = Contact
class ComplaintForm(forms.ModelForm):
class Meta:
model = Complaint
|
UTF-8
|
Python
| false | false | 2,014 |
10,325,101,426,691 |
b40ace2386f3584cac3f25dc36f6216a8f32298c
|
d8d26927aa0b43b94b8c607fe4981e07df0b4202
|
/tx2/UserProfile/Views/ExtraAcademicInfo.py
|
4f6a79d7727eb8b01f8725c5dd161a1010833a65
|
[] |
no_license
|
upcomingnewton/tx2
|
https://github.com/upcomingnewton/tx2
|
342c593fe482093a471b96dab5780c93ac198c7c
|
dd5029dab111ecb1bbf99511fe5403874e442565
|
refs/heads/master
| 2021-01-01T05:49:24.014969 | 2012-10-31T16:49:02 | 2012-10-31T16:49:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
Created on 01-Aug-2012
@author: jivjot
'''
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from tx2.Users.HelperFunctions.LoginDetails import GetLoginDetails
from tx2.CONFIG import LOGGER_USER_PROFILE
from tx2.UserProfile.BusinessFunctions.ExtraAcademicInfo import ExtraAcademicInfo
from django.contrib import messages
from tx2.UserProfile.models import ExtraAcademicInfoType
from tx2.UserProfile.models import ExtraAcademicInfoDetails
import logging
import inspect
import datetime
from tx2.Misc.MIscFunctions1 import is_integer
from django.core.exceptions import ObjectDoesNotExist
Logger_User = logging.getLogger(LOGGER_USER_PROFILE)
def ExtraAcdemicInfoTypeIndex(HttpRequest):
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
return render_to_response("UserProfile/ExtraAcdemicInfoType.html",context_instance=RequestContext(HttpRequest))
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcdemicInfoTypeInsert(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "ExtraAcdemicInfoTypeName" in HttpRequest.POST:
ExtraAcademicInfoTypeName=HttpRequest.POST["ExtraAcdemicInfoTypeName"]
else:
messages.error(HttpRequest,"Error fetching data from form for ExtraAcdemicInfoTypeName");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.InsertExtraAcademicInfoType(ExtraAcademicInfoTypeName, logindetails["userid"], ip)
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcdemicInfoTypeUpdate(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "Id" in HttpRequest.POST:
Id=HttpRequest.POST["Id"]
else:
messages.error(HttpRequest,"Error fetching data from form for Id");
flag=-1;
if "ExtraAcdemicInfoTypeName" in HttpRequest.POST:
ExtraAcademicInfoTypeName=HttpRequest.POST["ExtraAcdemicInfoTypeName"]
else:
messages.error(HttpRequest,"Error fetching data from form for ExtraAcdemicInfoTypeName");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.UpdateExtraAcademicInfoType(Id, ExtraAcademicInfoTypeName, logindetails["userid"], ip);
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def FunctionalAreaTypeIndex(HttpRequest):
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
return render_to_response("UserProfile/FunctionalAreaType.html",context_instance=RequestContext(HttpRequest))
def FunctionalAreaTypeInsert(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "FunctionalAreaTypeName" in HttpRequest.POST:
FunctionalAreaTypeName=HttpRequest.POST["FunctionalAreaTypeName"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalAreaTypeName");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.InsertFunctionalAreaType(FunctionalAreaTypeName, logindetails["userid"], ip)
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def FunctionalAreaTypeUpdate(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "Id" in HttpRequest.POST:
Id=HttpRequest.POST["Id"]
else:
messages.error(HttpRequest,"Error fetching data from form for Id");
flag=-1;
if "FunctionalAreaTypeName" in HttpRequest.POST:
FunctionalAreaTypeName=HttpRequest.POST["FunctionalAreaTypeName"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalAreaTypeName");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.UpdateFunctionalAreaType(Id, FunctionalAreaTypeName, logindetails["userid"], ip)
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcademicInfoDetailsIndex(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
try:
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
yearlist=range(1985,2014);
if 'v' in HttpRequest.GET:
if is_integer(HttpRequest.GET['v']):
_id=int(HttpRequest.GET['v'])
try:
obj=ExtraAcademicInfoDetails.objects.get(User=logindetails["userid"],id=_id)
return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,'ExtraAcadStatus':obj},context_instance=RequestContext(HttpRequest))
except ObjectDoesNotExist:
return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,},context_instance=RequestContext(HttpRequest))
else:
return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,},context_instance=RequestContext(HttpRequest))
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcademicInfoDetailsSelect(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
try:
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ObjList=ExtraAcademicInfoDetails.objects.filter(User=logindetails["userid"],State=1)
except ObjectDoesNotExist:
ObjList=[];
return render_to_response("UserProfile/ExtraAcademicInfoDetailsSelect.html",{'ObjList':ObjList},context_instance=RequestContext(HttpRequest))
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcademicInfoDetailsDelete(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
try:
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
if 'v' in HttpRequest.GET:
if is_integer(HttpRequest.GET['v']):
_id=int(HttpRequest.GET['v'])
try:
obj=ExtraAcademicInfoDetails.objects.get(User=logindetails["userid"],id=_id)
ExtraAcademicInfoObj=ExtraAcademicInfo()
result=ExtraAcademicInfoObj.DeleteExtraAcademicInfoDetails(_id,logindetails["userid"],logindetails["userid"], ip)
if(result['result']==1):
messages.info(HttpRequest,"Congrats Your details have been saved");
elif(result['result']==2):
messages.info(HttpRequest,"Does NOt Exist");
elif(result['result']==-2):
messages.error(HttpRequest,"You do not have the required privelege to this particular page.Either you do not have authenticated yourself or You have not completed your previous details ");
else:
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
return HttpResponseRedirect('/message/')
except ObjectDoesNotExist:
messages.error(HttpRequest,"Does NOt Exist");
return HttpResponseRedirect('/message/')
else:
messages.info(HttpRequest,"Does NOt Exist");
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcademicInfoDetailsInsert(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "IsOrganisation" in HttpRequest.POST:
isorganisation=1;
ExtraAcadmicInfoType_id=ExtraAcademicInfoType.objects.get(Name='Organisation').id;
else:
isorganisation=0;
ExtraAcadmicInfoType_id=ExtraAcademicInfoType.objects.get(Name='Personal').id;
if "Title" in HttpRequest.POST:
Title=HttpRequest.POST["Title"]
else:
messages.error(HttpRequest,"Error fetching data from form for Title");
flag=-1;
if "StartMonth" in HttpRequest.POST:
StartMonth=HttpRequest.POST["StartMonth"]
else:
messages.error(HttpRequest,"Error fetching data from form for StartMonth");
flag=-1;
if "StartYear" in HttpRequest.POST:
StartYear=HttpRequest.POST["StartYear"]
else:
messages.error(HttpRequest,"Error fetching data from form for StartYear");
flag=-1;
if flag!=-1:
_Start="1 "+StartMonth+" "+StartYear;
if "EndMonth" in HttpRequest.POST:
EndMonth=HttpRequest.POST["EndMonth"]
else:
messages.error(HttpRequest,"Error fetching data from form for EndMonth");
flag=-1;
if "EndYear" in HttpRequest.POST:
EndYear=HttpRequest.POST["EndYear"]
else:
messages.error(HttpRequest,"Error fetching data from form for EndYear");
flag=-1;
if flag!=-1:
End="1 "+EndMonth+" "+EndYear;
if "Organisation" in HttpRequest.POST and isorganisation==1:
_Organisation=HttpRequest.POST["Organisation"]
elif isorganisation==0:
_Organisation=''
else:
messages.error(HttpRequest,"Error fetching data from form for Organisation");
flag=-1;
if "Designation" in HttpRequest.POST and isorganisation==1:
Designation=HttpRequest.POST["Designation"]
elif isorganisation==0:
Designation=''
else:
messages.error(HttpRequest,"Error fetching data from form for Designation");
flag=-1;
if "Details" in HttpRequest.POST and isorganisation==1:
Details=HttpRequest.POST["Details"]
elif isorganisation==0:
Details=''
else:
messages.error(HttpRequest,"Error fetching data from form for Details");
flag=-1;
if "PlaceOfWork" in HttpRequest.POST and isorganisation==1:
PlaceOfWork=HttpRequest.POST["PlaceOfWork"]
elif isorganisation==0:
PlaceOfWork=''
else:
messages.error(HttpRequest,"Error fetching data from form for PlaceOfWork");
flag=-1;
if "FunctionalArea" in HttpRequest.POST:
FunctionalArea=HttpRequest.POST["FunctionalArea"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalArea");
flag=-1;
if "References" in HttpRequest.POST and isorganisation==1:
References=HttpRequest.POST["References"]
elif isorganisation==0:
References=''
else:
messages.error(HttpRequest,"Error fetching data from form for References");
flag=-1;
if "Summary" in HttpRequest.POST:
Summary=HttpRequest.POST["Summary"]
else:
messages.error(HttpRequest,"Error fetching data from form for Summary");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
if "Id" in HttpRequest.POST:
_id=int(HttpRequest.POST["Id"])
result=ExtraAcademicInfoObj.UpdateExtraAcademicInfoDetails(_id,logindetails["userid"], Title, _Start, End, _Organisation, Designation, Details, PlaceOfWork, FunctionalArea, ExtraAcadmicInfoType_id, References, Summary, logindetails["userid"], ip);
if(result['result']==-2):
messages.error(HttpRequest,"You do not have the required privelege to this particular page.Either you do not have authenticated yourself or You have not completed your previous details ");
elif(result['result']==1):
messages.info(HttpRequest,"Congrats Your details have been saved");
elif(result['result']==-4):
messages.error(HttpRequest,"Details does not exist");
elif(result['result']==-3):
messages.error(HttpRequest,"Already Exists");
else:
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
else:
result=ExtraAcademicInfoObj.InsertExtraAcademicInfoDetails(logindetails["userid"], Title, _Start, End, _Organisation, Designation, Details, PlaceOfWork, FunctionalArea, ExtraAcadmicInfoType_id, References, Summary, logindetails["userid"], ip);
if(result['result']==-2):
messages.error(HttpRequest,"You do not have the required privelege to this particular page.Either you do not have authenticated yourself or You have not completed your previous details ");
elif(result['result']==1):
messages.error(HttpRequest,"Congrats Your details have been saved");
if(result['result']==2):
dt=datetime.datetime.strptime(_Start,'%d %b %Y')
dat=datetime.date(dt.year,dt.month,dt.day)
_id=ExtraAcademicInfoDetails.objects.get(User=logindetails["userid"],Start=dat,Organisation=_Organisation).id
updateres=ExtraAcademicInfoObj.UpdateExtraAcademicInfoDetails(_id,logindetails["userid"], Title, _Start, End, _Organisation, Designation, Details, PlaceOfWork, FunctionalArea, ExtraAcadmicInfoType_id, References, Summary, logindetails["userid"], ip);
if(updateres['result']==1):
messages.error(HttpRequest,"We found a previous entry similar to Entered by You.");
messages.error(HttpRequest,"As Our System does not allow duplicate value.");
messages.error(HttpRequest,"We have updated your Entry with your current entered values");
else:
messages.error(HttpRequest,"result is %s"%updateres);
return HttpResponseRedirect('/message/')
#messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def ExtraAcademicInfoDetailsUpdate(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "Id" in HttpRequest.POST:
Id=HttpRequest.POST["Id"]
else:
messages.error(HttpRequest,"Error fetching data from form for Id");
flag=-1;
if "User_id" in HttpRequest.POST:
User_id=HttpRequest.POST["User_id"]
else:
messages.error(HttpRequest,"Error fetching data from form for User_id");
flag=-1;
if "Title" in HttpRequest.POST:
Title=HttpRequest.POST["Title"]
else:
messages.error(HttpRequest,"Error fetching data from form for Title");
flag=-1;
if "Start" in HttpRequest.POST:
Start=HttpRequest.POST["Start"]
else:
messages.error(HttpRequest,"Error fetching data from form for Start");
flag=-1;
if "End" in HttpRequest.POST:
End=HttpRequest.POST["End"]
else:
messages.error(HttpRequest,"Error fetching data from form for End");
flag=-1;
if "Organisation" in HttpRequest.POST:
Organisation=HttpRequest.POST["Organisation"]
else:
messages.error(HttpRequest,"Error fetching data from form for Organisation");
flag=-1;
if "Designation" in HttpRequest.POST:
Designation=HttpRequest.POST["Designation"]
else:
messages.error(HttpRequest,"Error fetching data from form for Designation");
flag=-1;
if "Details" in HttpRequest.POST:
Details=HttpRequest.POST["Details"]
else:
messages.error(HttpRequest,"Error fetching data from form for Details");
flag=-1;
if "PlaceOfWork_id" in HttpRequest.POST:
PlaceOfWork_id=HttpRequest.POST["PlaceOfWork_id"]
else:
messages.error(HttpRequest,"Error fetching data from form for PlaceOfWork_id");
flag=-1;
if "FunctionalArea" in HttpRequest.POST:
FunctionalArea=HttpRequest.POST["FunctionalArea"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalArea");
flag=-1;
if "ExtraAcadmicInfoType_id" in HttpRequest.POST:
ExtraAcadmicInfoType_id=HttpRequest.POST["ExtraAcadmicInfoType_id"]
else:
messages.error(HttpRequest,"Error fetching data from form for ExtraAcadmicInfoType_id");
flag=-1;
if "References" in HttpRequest.POST:
References=HttpRequest.POST["References"]
else:
messages.error(HttpRequest,"Error fetching data from form for References");
flag=-1;
if "Summary" in HttpRequest.POST:
Summary=HttpRequest.POST["Summary"]
else:
messages.error(HttpRequest,"Error fetching data from form for Summary");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.UpdateExtraAcademicInfoDetails(Id, User_id, Title, Start, End, Organisation, Designation, Details, PlaceOfWork_id, FunctionalArea, ExtraAcadmicInfoType_id, References, Summary, logindetails["userid"], ip);
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def FunctionalAreaListIndex(HttpRequest):
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
return render_to_response("UserProfile/FunctionalAreaList.html",context_instance=RequestContext(HttpRequest))
def FunctionalAreaListInsert(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "FunctionalAreaType_id" in HttpRequest.POST:
FunctionalAreaType_id=HttpRequest.POST["FunctionalAreaType_id"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalAreaType_id");
flag=-1;
if "FunctionalArea" in HttpRequest.POST:
FunctionalArea=HttpRequest.POST["FunctionalArea"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalArea");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.InsertFunctionalAreaList(FunctionalAreaType_id, FunctionalArea, logindetails["userid"], ip);
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
def FunctionalAreaListUpdate(HttpRequest):
ip = HttpRequest.META['REMOTE_ADDR']
logindetails = GetLoginDetails(HttpRequest)
print logindetails
if( logindetails["userid"] == -1):
messages.error(HttpRequest,'Please Login to continue')
return HttpResponseRedirect('/user/login/')
try:
ExtraAcademicInfoObj=ExtraAcademicInfo()
flag=1
if "Id" in HttpRequest.POST:
Id=HttpRequest.POST["Id"]
else:
messages.error(HttpRequest,"Error fetching data from form for Id");
flag=-1;
if "FunctionalAreaType_id" in HttpRequest.POST:
FunctionalAreaType_id=HttpRequest.POST["FunctionalAreaType_id"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalAreaType_id");
flag=-1;
if "FunctionalArea" in HttpRequest.POST:
FunctionalArea=HttpRequest.POST["FunctionalArea"]
else:
messages.error(HttpRequest,"Error fetching data from form for FunctionalArea");
flag=-1;
if flag==-1:
return HttpResponseRedirect('/message/')
result=ExtraAcademicInfoObj.UpdateFunctionalAreaList(Id, FunctionalAreaType_id, FunctionalArea, logindetails["userid"], ip);
messages.error(HttpRequest,"result is %s"%result);
return HttpResponseRedirect('/message/')
except Exception, ex:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
msg = ''
for i in args:
msg += "[%s : %s]" % (i,values[i])
Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg))
messages.error(HttpRequest,'ERROR: ' + str(ex))
return HttpResponseRedirect('/message/')
|
UTF-8
|
Python
| false | false | 2,012 |
14,800,457,351,901 |
7ec56148ca58b2f771a89d3316cd01d3a379b73c
|
99dc4f3e1daa66674bc494c40a2eb3fee7c35cad
|
/apps/employee/catedra/models.py
|
62be5f0848fd20de287c4318843d9b981f0b6b73
|
[] |
no_license
|
Olaff/fancy-frlp
|
https://github.com/Olaff/fancy-frlp
|
eb46f21d65ab9d7bc1923a4548247136f7bc658d
|
642e91111a167285338e84415da736c050debc2f
|
refs/heads/master
| 2020-05-01T08:05:53.418284 | 2014-06-02T15:38:05 | 2014-06-02T15:38:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#-*-encoding:utf-8 -*-
#Models para Catedras
from django.db import models
from django.utils.translation import gettext as _
from django_extensions.db.fields import AutoSlugField
from django.utils.text import slugify
from django.utils.encoding import smart_unicode
from django.core.urlresolvers import reverse
class InfoCatedra(models.Model):
class Meta:
abstract = True
ordering = ['-nombre']
nombre = models.CharField(max_length=50, verbose_name='Nombre')
jefe_catedra = models.CharField(max_length=50, verbose_name='Jefe de Cátedra')
jtp = models.CharField(max_length=50, verbose_name='Jefe de Trabajos Prácticos', blank=True)
sitio = models.URLField(max_length=100, verbose_name="Sitio web", help_text="Sitio web de la cátedra")
slug = AutoSlugField(_('slug'), max_length=50, unique=True, populate_from=('nombre',))
def __unicode__(self):
return smart_unicode(self.nombre)
def filename(self):
return os.path.basename(self.file.name)
def save(self, *args, **kwargs):
if not self.id:
#Only set the slug when the object is created.
self.slug = slugify(self.nombre) #Or whatever you want the slug to use
super(Catedra, self).save(*args, **kwargs)
class Catedra(InfoCatedra):
options_nivel = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
)
nivel = models.CharField(max_length=2,choices = options_nivel)
correlativas = models.ManyToManyField('self', symmetrical=False, blank=True, related_name="correlate", verbose_name="Correlativas")
bool_choices = ((True, 'Sí'), (False, 'No'))
electiva = models.BooleanField(default ='', choices = bool_choices)
carrera = models.ForeignKey('carrera.Carrera', default='')
sylabus = models.FileField(upload_to='alumnosweb/', blank=True)
comisiones = models.ManyToManyField('comision.Comision', related_name="catedras", through = 'horario.Horario')
def get_absolute_url(self):
return reverse('catedras:catedra_details', args=[self.slug])
|
UTF-8
|
Python
| false | false | 2,014 |
16,123,307,254,374 |
be9338a8dc076ede506099448dcbc4963c3bd749
|
105eff1ab986e9bbf1f9dee7ccb03ec9b9f3bdfe
|
/src/qsurvey.py
|
ae01141e968d6b95e0f26ea198e3a24c29f08edb
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only"
] |
non_permissive
|
rbianchi66/survey
|
https://github.com/rbianchi66/survey
|
81dedd6c442114eba7ea1226bc0af0bbb686265b
|
1de116133120948f7a48f505516a14019ce8865b
|
refs/heads/master
| 2021-01-25T04:01:26.384311 | 2013-09-13T15:39:22 | 2013-09-13T15:39:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PyQt4 import QtGui
import sys
import parms
cards = {}
class QSurvey(QtGui.QMainWindow):
def __init__(self):
super(QSurvey, self).__init__()
action = QtGui.QAction("&Action2", self)
menubar = self.menuBar()
menu1 = menubar.addMenu("&File")
menu1.addAction(action)
#menu1.addAction("&Action1")
#tb = self.addToolBar("File")
#tb.addAction(action)
self.setWindowTitle("Survey")
self.setGeometry(300, 300, 300, 200)
self.show()
if __name__ == "__main__":
import getopt
opts, args = [], []
try:
opts, args = getopt.getopt(sys.argv[1:], "s:", ["survey="])
except getopt.GetoptError, err:
print "Error reading options:",err
survey_file="survey.ini"
for o, a in opts:
if o in ("-s", "--survey"):
survey_file = a
app = QtGui.QApplication(sys.argv)
ali = QSurvey()
sys.exit(app.exec_())
|
UTF-8
|
Python
| false | false | 2,013 |
12,807,592,510,869 |
450552dd5d76dc6c39549e7b57d2070e05ec8d28
|
c3d0e1a6a3529c04d6daa689da7c24a38e7aeb44
|
/build/src/nox/netapps/tests/pyunittests/vlan_parse_test.py
|
a47f2792842bae450e385c02062970dbbbf2eecf
|
[
"GPL-3.0-or-later",
"GPL-3.0-only"
] |
non_permissive
|
esy2k/openflowq
|
https://github.com/esy2k/openflowq
|
22931d59ab409e1ce65f4c4dbee199985bd1fd29
|
635cbfd9da19f242f7d1f5ae5413e85ba4e5bd3b
|
refs/heads/master
| 2021-01-10T19:24:32.908051 | 2011-07-14T07:14:51 | 2011-07-14T07:14:51 | 2,040,499 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
../../../../../../src/nox/netapps/tests/pyunittests/vlan_parse_test.py
|
UTF-8
|
Python
| false | false | 2,011 |
9,053,791,089,712 |
a5bb9983f80afc06971995efee26a241d959bb1d
|
5e58ce863df76cafdb55c505878506a92f8a9fb2
|
/grid_tools/cloudy_grid_attach.py
|
c554148dba43c8dcd1c17b722636d79b0cefac4f
|
[] |
no_license
|
jwise77/hdf5_cooling_grid_tools
|
https://github.com/jwise77/hdf5_cooling_grid_tools
|
f63ddf7fd4d706d2b92ca9b6d7d494b93948717c
|
92ec22cac42794762aa0c5e874b095a17a3e583a
|
refs/heads/master
| 2020-12-29T08:27:02.589566 | 2012-11-03T20:10:17 | 2012-11-03T20:10:17 | 238,534,646 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Britton Smith <brittonsmith@gmail.com>
Routine for grafting together cooling datasets of different dimension.
"""
import h5py
import numpy as na
from hdf5_attributes import *
def graft_grid(input_lt,input_ht,outputFile,
data_fields=['Heating','Cooling','MMW'],
extra_field="metal free electron fraction"):
"""
Attach low temperature and high temperature cooling grids. The high
temperature grid will have one less dimension than the low temperature
grid, so duplicate data will be made in one dimension. Fixed electron
fractions are used to make the low temperature data, but not the high
temperature data, as this causes errors in Cloudy.
"""
# Open low temperature data and find dimension of the field not in the
# high temperature data.
extra_dim = -1
data_lt = {}
input = h5py.File(input_lt,'r')
print "Reading file: %s." % input_lt
for dataset in input.listnames():
data_lt[dataset] = input[dataset].value
attributes_lt = get_attributes(input)
input.close()
for dim in range(len(data_lt[data_fields[0]].shape)-1):
name = "Parameter%d" % (dim+1)
if attributes_lt[name]["Name"]['value'] == extra_field:
extra_dim = dim
if extra_dim < 0:
print "Field, %s, not found in %s." % (extra_field,input_lt)
return None
# Open high temperature data and create duplicate data in
# electron fraction dimension.
data_ht = {}
input = h5py.File(input_ht,'r')
print "Reading file: %s." % input_ht
for dataset in input.listnames():
data_ht[dataset] = input[dataset].value
attributes_ht = get_attributes(input)
input.close()
print "Combining datasets."
data_ht_new = {}
for dataset in data_fields:
data_ht_new[dataset] = _add_grid_dimension(data_ht[dataset],extra_dim,
(data_lt[dataset].shape)[extra_dim])
# Remove redundant temperature point.
redundant_point = False
if data_lt['Temperature'][-1] == data_ht['Temperature'][0]:
redundant_point = True
if redundant_point:
data_lt['Temperature'] = na.concatenate((data_lt['Temperature'],data_ht['Temperature'][1:]))
else:
data_lt['Temperature'] = na.concatenate((data_lt['Temperature'],data_ht['Temperature']))
attributes_lt['Temperature']["Dimension"]['value'][0] = data_lt['Temperature'].size
del data_ht
# Change dimension attribute.
for dataset in data_fields:
attributes_lt[dataset]["Dimension"]['value'][-1] = data_lt['Temperature'].size
# Concatenate datasets.
for dataset in data_fields:
if redundant_point:
data_ht_copy = data_ht_new[dataset]
data_ht_copy = na.rollaxis(data_ht_copy,(len(data_ht_copy.shape)-1),0)
data_ht_copy = data_ht_copy[1:]
data_ht_copy = na.rollaxis(data_ht_copy,0,len(data_ht_copy.shape))
else:
data_ht_copy = data_ht_new[dataset]
data_lt[dataset] = na.concatenate((data_lt[dataset],data_ht_copy),axis=-1)
# Write new dataset.
print "Writing file: %s." % outputFile
output = h5py.File(outputFile,'w')
for dataset in data_lt.keys():
output.create_dataset(dataset,data=data_lt[dataset])
write_attributes(output,attributes_lt)
output.close()
def _add_grid_dimension(grid,dimension,size):
"Add a dimension to the grid with duplicate data."
oldShape = grid.shape
newShape = list(oldShape)
newShape.reverse()
newShape.append(size)
newShape.reverse()
newGrid = na.zeros(newShape,dtype=grid.dtype)
newGrid[:] = grid
if dimension > 0:
newGrid = na.rollaxis(newGrid,0,dimension+1)
return newGrid
|
UTF-8
|
Python
| false | false | 2,012 |
11,639,361,385,945 |
18c2d1322be96edc01cb65cae065b283e09d5152
|
51047c4431a5f899e69ce48129bf0b627a66c107
|
/direg.py
|
988466b2848e7eb1e169929f86aa2e796b793df2
|
[
"GPL-2.0-only"
] |
non_permissive
|
jakejohns/py-direg
|
https://github.com/jakejohns/py-direg
|
513413d99f6e23139f6cf7423b037c08ef2c068c
|
940fe96b48739e7aef96dff1d70626cb4a68ee84
|
refs/heads/master
| 2020-05-17T03:09:16.690064 | 2014-02-10T09:23:13 | 2014-02-10T09:23:13 | 16,610,335 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
"""
Usage:
direg.py (-h|--help)
direg.py [--config=<config>] [-v...] [--log=<log>]
Regulates directories based on configuration file
Options:
--config=<config> Path to config [default: ~/.direg.py].
-v verbosity level -v, -vv, -vvv
"""
from docopt import docopt
import os, sys, time, glob, logging
import humanfriendly
from datetime import datetime
from datetime import timedelta
import collections
import calendar
__author__ = "Jake Johns"
__copyright__ = "Copyright 2014, Jake Johns"
__license__ = "GPL"
__version__ = "1.1.1"
__maintainer__ = "Jake Johns"
__email__ = "jake@jakejohns.net"
__status__ = "Production"
logger = logging.getLogger('direg')
# Default Tests
def max_size(directory):
"""Test directory for maximum size
returns true if directory size is larger than spec property "max_size"
"""
try:
max_size = humanfriendly.parse_size(directory.spec['max_size'])
except KeyError:
raise UnregulatableError('No "max_size" configured in specification!')
size = directory.size
logger.debug("Directory size is %s", humanfriendly.format_size(size))
logger.debug("max_size set to: %s", humanfriendly.format_size(max_size))
return size > max_size
def max_count(directory):
"""Test directory for maximum file count
returns true if directory contains more files than specified by spec
property "max_count"
"""
try:
max_count = int(directory.spec['max_count'])
except KeyError:
raise UnregulatableError('No "max_count" configured in specification!')
except ValueError:
raise UnregulatableError('"max_count" must be an integer!')
count = len(directory.contents)
logger.debug("File count is %s", count)
logger.debug("max_count set to: %s", max_count)
return count > max_count
def is_after(directory):
""" Test based on date input
returns true if now is greater than expiry
expiry can be callable or a string.
The resulting string is parsed by humanfriendly.parse_date
"""
try:
expiry = directory.spec['expiry']
if callable(expiry):
expiry = expiry()
expiry = datetime(*humanfriendly.parse_date(expiry))
except KeyError:
raise UnregulatableError('must specify "expiry" in spec')
except humanfriendly.InvalidDate:
raise UnregulatableError('expiry must be in format: YYYY-MM-DD [HH:MM:SS]')
now = datetime.now()
logger.debug('It is currently %s', str(now))
logger.debug('Expiry is %s', str(expiry))
return now > expiry
def is_day_of_week(directory):
""" Test returns true if today specified in directory dow
"""
try:
days = directory.spec['dow']
except KeyError:
raise UnregulatableError('Must specify "dow" in spec')
if not isinstance(days, collections.Sequence):
days = (days)
today = datetime.today().weekday()
logger.debug('Today is %s', calendar.day_abbr[today])
logger.debug('DOW is: %s', ', '.join(calendar.day_abbr[int(d)] for d in days))
return str(today) in ''.join(str(d) for d in days)
def always(directory):
""" Always returns true
"""
return True
def never(directory):
""" Always returns false
"""
return False
# Default Solutions
def remove_old(directory):
"""Solve directory by removing oldest files
removes files until directory test returns false
"""
contents = directory.contents
while contents:
if not directory.test():
break
os.remove(contents.pop())
def remove_older_than(directory):
"""Removes files older than number of seconds given in spec expiry
"""
try:
delta = directory.spec['cutoff']
except KeyError:
raise UnregulatableError('Must specify cutoff')
if isinstance(delta, timedelta):
delta = delta.total_seconds()
try:
delta = int(delta)
except ValueError:
raise UnregulatableError('cutoff must be int or timedelta!')
now = time.time()
cutoff = now - delta
logger.debug(
'Remove files that are %s old. Date: %s',
humanfriendly.format_timespan(delta),
datetime.fromtimestamp(cutoff)
)
contents = directory.contents
for f in contents:
if os.stat(f).st_mtime < cutoff:
os.remove(f)
def send_email(directory):
""" sends an email if directory test returns true
"""
raise Exception('Not Implemented')
def do_nothing(directory):
""" Does nothing
"""
pass
# Config lookups
default_tests = {
'max_size' : max_size,
'max_count' : max_count,
'is_after': is_after,
'is_day_of_week': is_day_of_week,
'always' : always,
'never' : never
}
default_solutions = {
'remove_old' : remove_old,
'send_email' : send_email,
'remove_older_than' : remove_older_than,
'do_nothing' : do_nothing
}
# Model
class UnregulatableError(Exception):
pass
class DiregDirectory(object):
""" Represents a directory
"""
_tester = None
_solution = None
def __init__(self, path, spec):
self.path = path
self.spec = spec
try:
self.tester = spec.get('test', default_tests['max_size'])
self.solution = spec.get('solution', default_solutions['remove_old'])
except (KeyError, TypeError):
raise UnregulatableError
@property
def tester(self):
"""Test strategy should return tru if action required"""
return self._tester
@tester.setter
def tester(self, value):
"""Sets test strategy
loads predefined strategy from string or checks that value is calable
"""
if isinstance(value, str):
try:
value = default_tests[value]
except KeyError:
logger.error('Invalid test "%s"!', value)
raise
if not callable(value):
logger.error('Test is not callable: %s', value)
raise TypeError
logger.debug('Setting "%s" test to "%s"', self.path, value.__name__)
self._tester = value
@property
def solution(self):
"""Solution strategy"""
return self._solution
@solution.setter
def solution(self, value):
"""Sets solution strategy
loads predefined strategy from string or checks that value is calable
"""
if isinstance(value, str):
try:
value = default_solutions[value]
except KeyError:
logger.error('Invalid solution "%s"!', value)
raise
if not callable(value):
logger.error('Solution is not callable: %s', value)
raise TypeError
logger.debug('Setting "%s" solution to "%s"', self.path, value.__name__)
self._solution = value
@property
def size(self):
"""Size of the files in the directory"""
logger.debug('Calculating size: %s', self.path)
total = 0
for dirpath, dirnames, filenames in os.walk(self.path):
for f in filenames:
fp = os.path.join(dirpath, f)
total += os.path.getsize(fp)
return total
@property
def contents(self):
"""Contents of the directory"""
return self.get_contents()
def get_contents(self, reverseOrder = True):
"""gets contents of directory, reverses sort order based on mtime
"""
logger.debug('Getting Contents: %s', self.path)
return sorted((os.path.join(dirname, filename) for dirname, dirnames,
filenames in os.walk(self.path) for filename in filenames),
key=lambda fn: os.stat(fn).st_mtime,
reverse = reverseOrder)
def test(self):
"""Invokes the test strategy"""
logger.debug('Testing %s', self.path)
return self.tester(self)
def solve(self):
"""Invokes the solution strategy"""
logger.debug('Solving %s', self.path)
return self.solution(self)
def regulate(self):
"""Regulates the directory"""
logger.info('Regulating directory: "%s", test:"%s", solution:"%s"',
self.path,
self.tester.__name__,
self.solution.__name__)
if self.test():
logger.info('Action Required: %s', self.path)
self.solve()
else:
logger.info('No action required: %s', self.path)
# Application
def regulate(directories) :
"""Regulates directories based on task list
"""
logger.debug('Regulating %s directories', len(directories))
for directory in directories :
try:
directory.regulate()
except UnregulatableError, e:
logger.error(e.message)
def load(directories):
"""Loads directory tasks based on specifications
"""
tasks = []
for spec in directories:
try:
logger.debug('Processing specification for: %s', spec['path'])
paths = glob.glob(os.path.expanduser(spec['path']))
if not paths:
raise UnregulatableError('no paths found in glob')
for path in paths:
if not os.path.isdir(path):
logger.error('%s is not a directory', path)
continue
if not os.access(path, os.R_OK):
logger.error('%s is not readable', path)
continue
if not os.access(path, os.W_OK):
logger.warning('%s is not writable', path)
logger.debug('adding %s to tasks', path)
tasks.append(DiregDirectory(path, spec))
except KeyError:
logger.error('Directory specification mission path component')
except UnregulatableError:
logger.error('Configuration problem! Skipping "%s"', path)
return tasks
def config_logger(args):
""" configures logging
"""
lvl = logging.ERROR - (10 * args['-v'])
logger.setLevel(lvl)
if args['--log']:
handle = logging.FileHandler(args['--log'])
else:
handle = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handle.setLevel(lvl)
handle.setFormatter(formatter)
logger.addHandler(handle)
# Interface
if __name__ == '__main__':
args = docopt(__doc__, version=__version__)
config_logger(args)
config = {}
try:
configfile = os.path.expanduser(args['--config'])
logger.debug('Loading config file: %s', configfile)
execfile(configfile, config)
logger.debug('Configuration: %s', config['directories'])
except IOError:
logger.critical('Cannot read config: %s', configfile)
sys.exit("Cannot read configuration file")
regulate(load(config['directories']))
|
UTF-8
|
Python
| false | false | 2,014 |
3,504,693,324,755 |
ed47784e78d621de78383eed24fad28e8d952376
|
9df05c8e6380ae0365e3effc9004d1e2b729079f
|
/posttrack.py
|
0ee10844a4ed761d35a1d5490da7062b704b1716
|
[] |
no_license
|
striker2000/misc
|
https://github.com/striker2000/misc
|
14fe190d29f5459da4a990773afc74a76c44baf7
|
5466b6a174f0dab92ca5fd13dc693234b6bb396c
|
refs/heads/master
| 2016-09-10T17:35:39.835003 | 2014-06-19T16:16:52 | 2014-06-19T16:16:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
from prettytable import PrettyTable
from suds.client import Client
import argparse
import re
import sys
def barcode_type(s):
s = s.upper()
if not re.match('^[A-Z]{2}\d{9}[A-Z]{2}$', s) and not re.match('^\d{14}$', s):
raise argparse.ArgumentTypeError('wrong barcode')
return s
parser = argparse.ArgumentParser(description='Show tracking info from Russian Post service.')
parser.add_argument('barcode', type=barcode_type, help='item barcode')
args = parser.parse_args()
try:
client = Client('http://voh.russianpost.ru:8080/niips-operationhistory-web/OperationHistory?wsdl')
history = client.service.GetOperationHistory(Barcode=args.barcode, MessageType=0)
except Exception as e:
sys.exit(e)
table = PrettyTable(['Date', 'Operation', 'Address', 'Weight'])
for row in history:
date = operation = address = weight = ''
try:
date = row.OperationParameters.OperDate
except:
pass
try:
operation = row.OperationParameters.OperType.Name
operation += ' (' + row.OperationParameters.OperAttr.Name + ')'
except:
pass
try:
address = row.AddressParameters.OperationAddress.Description
address = row.AddressParameters.OperationAddress.Index + ' ' + address
except:
pass
try:
weight = row.ItemParameters.Mass
weight /= 1000.0
except:
pass
table.add_row([
date,
operation,
address,
weight,
])
if hasattr(table, 'align'):
table.align = 'l'
else:
for field in table.fields:
table.set_field_align(field, 'l')
print table.get_string()
|
UTF-8
|
Python
| false | false | 2,014 |
5,634,997,096,525 |
70b6500d8edb25101b90bf5586fb8393814fbd4e
|
3591a689e02e35ee169d6a56cea87967fc723741
|
/apps/books/utils.py
|
2edf2290f276acb89547610ef61152cf6cc3f1c4
|
[] |
no_license
|
andreyfedoseev/djangourls.com
|
https://github.com/andreyfedoseev/djangourls.com
|
3aff95cdd6ba8d1e898af7800ceb5f4944bf9628
|
6b04f4db64802c1c38aa0bf1e5fcd4409e87ae99
|
refs/heads/master
| 2020-05-30T10:39:26.906987 | 2014-11-30T16:45:46 | 2014-11-30T16:45:46 | 794,714 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from books.models import BookSearch
from books.settings import AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY, AMAZON_LOCALE
from django.core.cache import cache
from django.utils.hashcompat import md5_constructor
import amazonproduct
import urllib
def get_books():
if not AMAZON_ACCESS_KEY or not AMAZON_SECRET_KEY:
return []
api = amazonproduct.API(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY, AMAZON_LOCALE)
books = {}
for search in BookSearch.objects.all():
cache_key = 'books.get_books.%s' % md5_constructor(search.__repr__()).hexdigest()
result = cache.get(cache_key, None)
if result is None:
result = {}
kw = {'ResponseGroup': 'Medium'}
kw['Keywords'] = search.keywords
if search.associate_tag:
kw['AssociateTag'] = search.associate_tag
if search.browse_node:
kw['BrowseNode'] = str(search.browse_node)
for item in api.item_search('Books', **kw).Items.Item:
try:
data = get_book_data(item)
except:
continue
result[data['url']] = data
cache.set(cache_key, result, 3600 * 24)
books.update(result)
return list(books.values())
def get_book_data(item):
data = {}
if item.ItemAttributes.ProductGroup != 'Book':
raise Exception("This amazon ID does not refer to book product.")
data['url'] = urllib.unquote(unicode(item.DetailPageURL))
data['title'] = unicode(item.ItemAttributes.Title)
data['description'] = u''
if hasattr(item, "EditorialReviews"):
for r in item.EditorialReviews.EditorialReview:
if r.Source == 'Product Description':
data['description'] = unicode(r.Content)
break
data['author'] = unicode(getattr(item.ItemAttributes, "Author", u''))
data['small_image_url'] = unicode(item.SmallImage.URL)
data['medium_image_url'] = unicode(item.MediumImage.URL)
data['large_image_url'] = unicode(item.LargeImage.URL)
return data
|
UTF-8
|
Python
| false | false | 2,014 |
8,701,603,788,995 |
209e0898d8a8ceca30ae5cbd8e97bab9d45f6907
|
f9b60ab3efeb07f7fbabbbc33025a16b43cf2702
|
/eventsocket/subscribers/crud.py
|
e43b8103e40f87e58bb1f2d3693a15589c1f51eb
|
[] |
no_license
|
zbyte64/django-hyperadmin-eventsocket
|
https://github.com/zbyte64/django-hyperadmin-eventsocket
|
aa39296b10bc18c36127bf78c5e3e87a20b89bdf
|
220fe64c9a74d5276ea7e9e70295d8565c62f577
|
refs/heads/master
| 2020-06-05T10:12:30.599692 | 2013-04-04T21:55:50 | 2013-04-04T21:55:50 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from eventsocket.subscribers.base import Subscriber
class CRUDSubscriber(Subscriber):
#would also want this split into create, update delete
pass
|
UTF-8
|
Python
| false | false | 2,013 |
13,769,665,154,202 |
e9c12c8cd9a129d9e5b69bf643fa387c4e908f3a
|
bf4de3555aac5b86b18c1ec5f745a1f583e382d4
|
/visualizer/procMapGeneratorPrototype/procmap.py
|
7250a5c94eeb423ca8cb87cce2d9d84204ae8be1
|
[] |
no_license
|
siggame/MegaMinerAI-7
|
https://github.com/siggame/MegaMinerAI-7
|
3b6d4c7093127442c075a812c3217ed0cb4f31b8
|
fd2f3da2d0416ff080012e8c38a9a037c4e09f69
|
refs/heads/master
| 2020-04-06T07:12:58.401152 | 2012-10-12T19:06:09 | 2012-10-12T19:06:09 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from PIL import Image, ImageOps, ImageFilter
import math
import sys
f = open( sys.argv[1], 'r' )
lines = [];
for line in f:
lines.append(line);
sz = 1000
depthModifier = .25
perlinModifier = 0.25
surroundings = 5
numIterations = 1
textures = [
[ Image.open( "deepWater.jpg" ).load(), -256 ],
[ Image.open( "midWater.jpg" ).load(), -40 ],
[ Image.open( "shallowWater.jpg" ).load(), 50 ],
[ Image.open( "beach.jpg" ).load(), 80 ],
[ Image.open( "grass.jpg" ).load(), 110 ],
[ Image.open( "tundra.jpg" ).load(), 512 ]
]
distanceMap = [[]]
for i in range( len(lines) ):
lines[i] = lines[i].replace( ' ', '' )
for y in range( len(lines) ):
distanceMap.append([]);
for x in range( len(lines) ):
land = 1
if lines[x][y] == '.':
land = 0
dist = 0
for reach in range( 1, len(lines), 1 ):
for i in range( -reach, reach+1 ):
if x+i < 0 or x+i >= len(lines):
continue
for j in range( -reach, reach+1):
if y+j < 0 or y+j >= len(lines):
continue
if land:
if lines[x+i][y+j] == '.':
dist = reach
break
else:
if lines[x+i][y+j] != '.':
dist = reach
break
if dist > 0:
break
if dist > 0:
break
distanceMap[y].append( dist )
orgSize = (len(lines),len(lines))
size = (sz,sz)
originalMap = Image.new( "I", size )
oData = originalMap.load()
for y in range( size[1] ):
oY = int(math.floor( y*orgSize[1]/size[1] ))
for x in range( size[0] ):
oX = int(math.floor( x*orgSize[0]/size[0] ))
if( lines[oX][oY] == '.' ):
oData[ y,x ] = -20 * distanceMap[oY][oX];
else:
oData[ y,x ] = 20 * distanceMap[oY][oX];
pass
def surroundingValues( texture, x, y ):
depthVal = 0
for i in range( -surroundings, surroundings+1 ):
if x+i >= size[0] or x+i < 0:
continue
for j in range( -surroundings, surroundings+1 ):
if y+j >= size[1] or y+j < 0:
continue
if i != 0 or j != 0 :
depthVal += texture[x+i,y+j]
return depthVal
biggest = 0
smallest = 0
def getPositive( texture ):
tData = texture.load()
rTexture = Image.new( "L", size )
rData = rTexture.load()
for x in range( size[0] ):
for y in range( size[1] ):
if( tData[x,y] > 0 ):
rData[x,y] = tData[x,y]
else:
rData[x,y] = 0
return rTexture
def getNegative( texture ):
tData = texture.load()
rTexture = Image.new( "L", size )
rData = rTexture.load()
for x in range( size[0] ):
for y in range( size[1] ):
if( tData[x,y] < 0 ):
rData[x,y] = tData[x,y] *-1
else:
rData[x,y] = 0
return rTexture
for i in range( numIterations ):
newMap = originalMap.copy()
nData = newMap.load()
oData = originalMap.load()
biggest = smallest = 0
for x in range( size[0] ):
print "{0}/{1}".format( x, size[0] )
for y in range( size[1] ):
nData[ x,y ] = surroundingValues( oData, x, y ) * depthModifier
if nData[ x,y ] > biggest:
biggest = nData[ x,y ]
if nData[ x,y ] < smallest:
smallest = nData[x,y]
originalMap = newMap.copy();
finalMap = Image.new( "RGB", size )
def normalize( element, small, big, lower, upper ):
return float(element - small)*float(upper-lower)/float(big-small)+lower;
def sigmoidFunc( value ):
return value;
return 1/(1+math.exp( -.333 * value ) )
def interp( depth, x, y ):
baseTex = -1;
baseDepth = textures[0][1] - 5000
while depth >= baseDepth and baseTex+1 < len(textures):
baseTex+=1
baseDepth = textures[baseTex][1]
nextTex = baseTex
baseTex -= 1
baseDepth = textures[baseTex][1]
nextDepth = textures[nextTex][1]
shit = normalize( depth, smallest, biggest, -256, 256 )
percentInterp = sigmoidFunc( (depth-baseDepth)/(nextDepth-baseDepth ) )
c1 = textures[baseTex][0][ x,y ]
c2 = textures[nextTex][0][ x,y ]
color1 = [c1[0],c1[1],c1[2]]
color2 = [(c2[0]-c1[0])*percentInterp,(c2[1]-c1[1])*percentInterp,(c2[2]-c1[2])*percentInterp]
color3 = (color1[0]+color2[0],color1[1]+color2[1], color1[2]+color2[2])
return color3;
fData = finalMap.load()
oData = originalMap.load()
for x in range( 0, size[0] ):
for y in range( 0, size[1] ):
fData[x,y] = interp( normalize( oData[x,y], smallest, biggest, -256, 256 ), x, y )
finalMap.show()
|
UTF-8
|
Python
| false | false | 2,012 |
16,947,940,977,680 |
39abd5cf7ddec857a0033a778163c99ff219e8e7
|
52c74b4cd7be186245c3ed046b459a8058c9e062
|
/chukchi/email.py
|
989ac29613f1ddd3f5ece7bc8355498280264fd7
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"metamail",
"LicenseRef-scancode-other-permissive",
"Beerware",
"LicenseRef-scancode-zeusbench",
"Apache-1.1",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-pcre",
"Spencer-94",
"LicenseRef-scancode-public-domain",
"NTP",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant",
"RSA-MD"
] |
non_permissive
|
hades/chukchi
|
https://github.com/hades/chukchi
|
55f612d8872ebf5babfdaa376a80f320df1f4a24
|
fd76f152ec28ee692a83d83b22bcb8997ee1bf99
|
refs/heads/master
| 2016-09-16T11:47:32.648326 | 2013-11-18T14:04:19 | 2013-11-18T14:04:19 | 10,243,178 | 0 | 1 | null | false | 2013-07-23T18:31:00 | 2013-05-23T12:25:16 | 2013-07-23T18:23:07 | 2013-07-23T17:29:26 | 388 | null | 1 | 11 |
Python
| null | null |
# This file is part of Chukchi, the free web-based RSS aggregator
#
# Copyright (C) 2013 Edward Toroshchin <chukchi-project@hades.name>
#
# Chukchi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Chukchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# Please see the file COPYING in the root directory of this project.
# If you are unable to locate this file, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
import smtplib
from email.mime.text import MIMEText
from .config import config
LOG = logging.getLogger(__name__)
def send_email(from_, to, subject, text):
to_email = None
if isinstance(to, (str, unicode)):
to_email = to
elif hasattr(to, 'email'):
to_email = to.email
if isinstance(to_email, unicode):
to_email = to_email.encode('utf-8')
if isinstance(subject, unicode):
subject = subject.encode('utf-8')
if not to_email:
LOG.error("no address specified for recipient %s", to)
return
msg = MIMEText(text)
msg.set_charset('utf-8')
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = to_email
try:
s = smtplib.SMTP(config.EMAIL_HOST)
s.sendmail(from_, [to_email], msg.as_string())
s.quit()
return True
except smtplib.SMTPException:
LOG.exception("error sending email to %s", to)
return False
# vi: sw=4:ts=4:et
|
UTF-8
|
Python
| false | false | 2,013 |
13,159,779,809,130 |
aa17745ad07ec3e3fdf1de326cb040c4bb01e4e6
|
eab8603ee9d90e46eb4a8be1103762e365d8c684
|
/test_parse_tedxvid.py
|
cb72330bd3be8c87d67f2b56920d543190e46459
|
[] |
no_license
|
amitch/ytube-tedx
|
https://github.com/amitch/ytube-tedx
|
eaa71658cb5a3dc72e27af952764551369d55399
|
857a87a606501e63d4022e551d71638ccbbdfa59
|
refs/heads/master
| 2020-09-13T09:25:07.042704 | 2012-01-08T19:26:14 | 2012-01-08T19:26:14 | 3,035,867 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Class to test, parse_tedxvid.py, parse TEDx Videos from youtube
# Copyright, amitch@rajgad.com, 2011
import unittest;
from parse_tedxvid import *;
class TestParseTedXVid(unittest.TestCase):
def setUp(self):
self.video_list1 = [
"TEDxSeoul - Dr. Kim and Jung - 11/28/09,16947,https://www.youtube.com/watch?v=un4qbATrmx8&feature=youtube_gdata_player",
"TEDxCMU - Chris Guillebeau - Fear and Permission,16867,https://www.youtube.com/watch?v=unxL5RRhNb0&feature=youtube_gdata_player",
"TEDxCMU - Daniel Simons - Seeing The World As It Isn't,16781,https://www.youtube.com/watch?v=9Il_D3Xt9W0&feature=youtube_gdata_player",
"TEDxTokyo - Renee Byer - 5/22/09,16758,https://www.youtube.com/watch?v=Z3CDOS4GNdQ&feature=youtube_gdata_player",
"TEDxSF- Callie Curry aka Swoon,16622,https://www.youtube.com/watch?v=5298KZuW_JE&feature=youtube_gdata_player",
"TEDxVancouver - Nazanin Afshin-Jam - Voice for the Voiceless,16545,https://www.youtube.com/watch?v=soqtTCeczbM&feature=youtube_gdata_player",
"TEDxSF - Oana Pellea,16348,https://www.youtube.com/watch?v=OZdFErDoU3U&feature=youtube_gdata_player",
"TEDxVancouver - Jeet Kei Leung - Transformational Festivals,16335,https://www.youtube.com/watch?v=Q8tDpQp6m0A&feature=youtube_gdata_player",
"TEDxSF 2011 - Wade Adams - Nanotechnology and Energy,16270,https://www.youtube.com/watch?v=1GFst2IQBEM&feature=youtube_gdata_player",
"TEDxSF - Grow a New Eye,16153,https://www.youtube.com/watch?v=T-ldzLSFxds&feature=youtube_gdata_player"
]
# Test for single top conference
def test_parsevid1(self):
parsevid = ParseTEDxVideos()
topconf = parsevid.gettopconf(1, self.video_list1)
self.assertEqual(topconf[0].split(",")[0], "TEDxSF")
# Test for multiple top conference
def test_parsevid_top3(self):
parsevid = ParseTEDxVideos()
topconf_details = parsevid.gettopconf(3, self.video_list1)
topconf =[]
for topconf_detail in topconf_details:
topconf.append(topconf_detail.split(",")[0])
# If using Py 2.7
#self.assertIn("TEDxSF", topconf)
self.assertTrue("TEDxSF" in topconf)
self.assertTrue("TEDxVancouver" in topconf)
self.assertTrue("TEDxCMU" in topconf)
self.assertFalse("TEDxTokyo" in topconf)
if __name__ == '__main__':
unittest.main()
|
UTF-8
|
Python
| false | false | 2,012 |
1,932,735,286,799 |
2468e629b878a1dadcfcffd51ec76a17d965daa7
|
8d10d24c96a7c4991c9029e91c13282b8428aba2
|
/maps/views.py
|
a189e8315c475f9f1f73705dae9bd09da66ee19a
|
[] |
no_license
|
tyrocca/cs50_final_project
|
https://github.com/tyrocca/cs50_final_project
|
46cfadd0adb221bedc9b5c7b63839aa65cce7ef0
|
07561112001b92dfdb9d85d4cef0a1b88565e0ae
|
refs/heads/master
| 2021-03-19T16:21:47.334027 | 2014-12-07T16:38:15 | 2014-12-07T16:38:15 | 26,443,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
import simplejson as json
def nation_map(request):
from polls.models import PresidentElection
# get the models from the data base and order alphabetically
results = PresidentElection.objects.all().order_by("state")
return render(request, 'map_base.html')
def dem_rank(request):
from polls.models import PresidentElection
dem_vals = {value: key for value, key in list(PresidentElection.objects.values_list("state", "dem_pct" ))}
return HttpResponse(json.dumps(dem_vals,use_decimal=True), mimetype='application/json')
def rep_rank(request):
from polls.models import PresidentElection
rep_vals = {value: key for value, key in list(PresidentElection.objects.values_list("state", "rep_pct" ))}
return HttpResponse(json.dumps(rep_vals,use_decimal=True), mimetype='application/json')
def state_data(request, state_abbrev):
from polls.models import PresidentElection
rets = PresidentElection.objects.filter(state=state_abbrev)[0]
new = {"data": [rets.dem_pct, rets.dem_num, rets.rep_pct, rets.rep_num, rets.other_pct]}
return HttpResponse(json.dumps(new,use_decimal=True), mimetype='application/json')
|
UTF-8
|
Python
| false | false | 2,014 |
2,911,987,868,233 |
924f2cdcc3ed74c14388977dd13db38f06cf752a
|
b83bd2709729ac2eb65026ba6e377d619371369a
|
/game/role.py
|
db0a66c66c4e04251b044a8a1b43b82c2549f662
|
[] |
no_license
|
main1015/pygame-demo
|
https://github.com/main1015/pygame-demo
|
cbc36a1066497346a14c016c5904106740ef0aac
|
a6bebfdc7f21ec675f29155f039d410431785050
|
refs/heads/master
| 2021-01-22T05:05:20.932704 | 2013-07-04T09:37:05 | 2013-07-04T09:37:05 | 11,117,996 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import pygame
from wx import Rect
from gameobjects.vector2 import Vector2
from game.util import load_character
__author__ = 'Administrator'
class RoleIcon(pygame.sprite.Sprite):
_rate = 100
_width = 100
_height = 120
_X_STEP = 80
_Y_STEP = 0
X_STEP = _X_STEP
Y_STEP = _Y_STEP
_w_number = 8
_h_number = 4
isOver = False
move_step = 1
step = 0
images = []
MAX = 10
def __init__(self,filename):
self.order = 0
pygame.sprite.Sprite.__init__(self)
if len(self.images) == 0:
self.images = load_character(filename, self._width,self._height,self._w_number,self._h_number)
self.image = self.images[self.order]
self.rect = Rect(0, 0, self._width, self._height)
# self.life = self._life
self.passed_time = 0
self.rect.left = 0
self.rect.top = 0
self.count = 1
def _isRun(self):
self.count += 1
if self.count > self.MAX:
self.count = 1
return True
else:
return False
def update(self,current_time):
self.move()
# if self.passed_time < current_time:
# self.step += 1
# if self.step == self._w_number:
# self.step = 0
# self.order = self.step
# self.image = self.images[self.order]
# self.rect.left,self.rect.top = pygame.mouse.get_pos()
# self.move()
def set_move(self,form_to):
self.form_to = form_to
x,y = self.form_to
start_point = Vector2(self.rect.left,self.rect.top)
end_point = Vector2(x,y)
point = end_point - start_point
if point:
self.isOver = True
self.move_step = self._get_step(point[0])
def move(self):
if self._isRun():
self.step += 1
if self.step == self._w_number:
self.step = 0
self.order = self.step
self.image = self.images[self.order]
if self.isOver:
self._move()
def _move(self):
x,y = self.form_to
start_point = Vector2(self.rect.left,self.rect.top)
end_point = Vector2(x,y)
point = end_point - start_point
self.Y_STEP = (point[1]//self.move_step) if self.move_step else 0
if point[0]*self.X_STEP<0:
self.X_STEP = -self.X_STEP
if point[1]*self.Y_STEP<0:
self.Y_STEP = -self.Y_STEP
self.move_step = self.move_step-1 if self.move_step-1 >=1 else 1
if self.move_step == 1:
self.isOver = False
self.X_STEP = self._X_STEP
self.Y_STEP = self._Y_STEP
self.rect.left = x
# if self.rect.top>=y:
self.rect.top = y
else:
self.rect.left += self.X_STEP
self.rect.top += self.Y_STEP
print '~'*20,self.rect.left,self.rect.top,self.X_STEP,self.Y_STEP,self.move_step,x,y
def _get_step(self,distance):
steps = abs(distance // self.X_STEP)
if distance % self.X_STEP !=0:
steps += 1
return steps
|
UTF-8
|
Python
| false | false | 2,013 |
3,985,729,672,062 |
8ead33d6bb0dcd0b32ed134e84bec5b9b0e6930c
|
27919a0e5f3bc9971272a8f3119fc4d7d7e2d30c
|
/src/sample/sphinx/wscript
|
3c2cbe8ac6da681d525cf03bfaba9bd646e72943
|
[] |
no_license
|
kskels/ubi
|
https://github.com/kskels/ubi
|
b12fd94f700006e7befaa78d889bc9b2f01c3712
|
b252470b99aa971c266ba740b9a0ae694a15372e
|
refs/heads/master
| 2021-01-04T02:37:24.096235 | 2012-04-22T16:02:38 | 2012-04-22T16:02:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#! /usr/bin/env python
# encoding: utf-8
APPNAME = 'ubi'
VERSION = '0.1'
out = 'bin'
def options(opt):
opt.load('compiler_cxx')
def configure(ctx):
ctx.load('compiler_cxx')
# sphinx
lib_sphinx = ['pocketsphinx', 'sphinxbase', 'sphinxad', 'boost_thread']
libpath_sphinx = ['/opt/sphinx/base/0.7/lib', '/opt/sphinx/pocket/0.7/lib']
include_sphinx = [
'/opt/sphinx/pocket/0.7/include/pocketsphinx',
'/opt/sphinx/pocket/0.7/include',
'/opt/sphinx/base/0.7/include/sphinxbase',
'/opt/sphinx/base/0.7/include'
]
def build(ctx):
ctx.program(
source='main.cpp',
target='ubi',
includes = include_sphinx,
libpath = libpath_sphinx,
lib = lib_sphinx
)
def dist(ctx):
ctx.algo = 'tar.gz'
|
UTF-8
|
Python
| false | false | 2,012 |
15,118,284,910,483 |
97770b6b36dcf941618fdf76722bcf0fffbc0a89
|
6e8a0c03173f68b09064b5d5428f064deaa3f8ee
|
/logging2.py
|
022c7a42a5c0db24ac837cc777f5ed4e298b81d2
|
[] |
no_license
|
dx3759/getwebhtmls
|
https://github.com/dx3759/getwebhtmls
|
7c8df122390944fad7016b121a963ccef154015f
|
31859d5c11d19cfbbd780e14dfc304f87ab5d647
|
refs/heads/master
| 2016-09-03T07:15:09.158173 | 2012-11-04T00:34:20 | 2012-11-04T00:34:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
#-*- coding:utf-8 -*-
import logging
import threading
import Queue
#logging.basicConfig(level = logging.DEBUG,
# format = '%(asctime)s %(levelname)-8s %(message)s',
# datefmt = '%a,%d %b %Y %H:%M:%S',
# filename = '/tmp/myapp.log',
# filemode = 'w')
class logging2(threading.Thread):
"""定义一个写日志的线程,调用logging写入文件"""
A_Queue = Queue.Queue() #用来存放 日志队列
#logging.basicConfig(level = logging.DEBUG,format = '%(asctime)s %(levelname)-8s %(message)s',datefmt = '%a,%d %b %Y %H:%M:%S',filename = '/tmp/myapp.log',filemode = 'w')
def __init__(self,levels,files):
threading.Thread.__init__(self)
self.name = 'logging2'
logging.basicConfig(level = levels,format = '%(asctime)s %(levelname)-8s %(message)s',datefmt = '%a,%d %b %Y %H:%M:%S',filename = files,filemode = 'w')
def run(self):
while 1:
data = logging2.A_Queue.get()
#print 'data',data
loglevel = data.keys()[0]
content = data.values()[0]
getattr(logging,loglevel)(content)
def debug(content):
logging2.A_Queue.put({'debug':content})
def info(content):
logging2.A_Queue.put({'info':content})
def warning(content):
logging2.A_Queue.put({'warning':content})
def error(content):
logging2.A_Queue.put({'error':content})
def critical(content):
logging2.A_Queue.put({'critical':content})
def init(levels,files):
"""开始写日志线程"""
if levels == 1:
levels = logging.DEBUG
elif levels == 2:
levels = logging.INFO
elif levels == 3:
levels = logging.WARNING
elif levels == 4:
levels = logging.ERROR
elif levels == 5:
levels = logging.CRITICAL
#logging.basicConfig(level = levels,format = '%(asctime)s %(levelname)-8s %(message)s',datefmt = '%a,%d %b %Y %H:%M:%S',filename = files,filemode = 'w')
cc = logging2(levels,files)
cc.setDaemon(True)
cc.start()
|
UTF-8
|
Python
| false | false | 2,012 |
19,164,144,107,243 |
a35bde520a090498a2d70387ba736457216c8c4d
|
82a172b54deae818f6294fc925d389bf02443931
|
/pacs/test/test_nl_rls.py
|
e961c8942abc2671dd7deadfba01755cd6916942
|
[] |
no_license
|
pchanial/tamasis-pacs
|
https://github.com/pchanial/tamasis-pacs
|
be64bf0ad1bf79dd2c95aa14c82f2b1e81c1acdd
|
229dafd3a529e368a8d2fb41e2a678c6a479e83e
|
refs/heads/master
| 2016-09-10T17:33:22.213677 | 2013-05-15T08:29:00 | 2013-05-15T08:29:00 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
import tamasis
import pyoperators
from pyoperators import DiagonalOperator, MaskOperator
from tamasis import PacsObservation, mapper_nl
from tamasis.linalg import norm2, norm2_ellipsoid
class TestFailure(Exception): pass
pyoperators.memory.verbose = False
tamasis.var.verbose = False
profile = None#'test_rls.png'
data_dir = os.path.dirname(__file__) + '/data/'
obs = PacsObservation(filename=data_dir+'frames_blue.fits',
fine_sampling_factor=1)
obs.pointing.chop[:] = 0
tod = obs.get_tod()
projection = obs.get_projection_operator(resolution=3.2, downsampling=True,
npixels_per_sample=6)
masking_tod = MaskOperator(tod.mask)
model = masking_tod * projection
# iterative map, taking all map pixels
class Callback():
def __init__(self):
self.niterations = 0
def __call__(self, x):
self.niterations += 1
invntt = DiagonalOperator(1/obs.get_detector_stddev(100)**2,
broadcast='rightward')
def test():
map_nl = mapper_nl(tod, model, hypers=2*[1.],
norms=[norm2_ellipsoid(invntt)] + 2*[norm2],
tol=1.e-4, maxiter=1000,
callback=None if tamasis.var.verbose else Callback(),
)
print 'Elapsed time: ' + str(map_nl.header['TIME']) + ' after ' + \
str(map_nl.header['NITER']) + ' iterations.'
if map_nl.header['NITER'] > 150:
raise TestFailure()
if __name__ == '__main__':
test()
|
UTF-8
|
Python
| false | false | 2,013 |
14,998,025,823,746 |
3e5ff3778168ecd802b716fb6a82854960e50cf6
|
aee2b8e2598cccc37a18f9feab41f20e8fc5a5aa
|
/20111016-ultimate-foomash/ultimate_foomash/foomash/models.py
|
e1407a67e401180ad319eb44b8953933a06f192c
|
[] |
no_license
|
code-club/code-club
|
https://github.com/code-club/code-club
|
b85c1131e45904cdb3d1e932eab4202e4deddf75
|
e3679e8fec266f8b8c174cd80355d393652e64ae
|
refs/heads/master
| 2020-06-03T13:42:38.292757 | 2011-10-26T21:15:10 | 2011-10-26T21:15:10 | 1,515,476 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=200)
photo = models.ImageField(upload_to='photos')
score = models.FloatField(default=0)
added_date = models.DateTimeField(auto_now_add=True)
categor = models.ForeignKey('Category')
def __unicode__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Categories'
|
UTF-8
|
Python
| false | false | 2,011 |
18,983,755,480,509 |
d92118d6a033826f763612a7c5116c30bd3f6133
|
06283ebd66870627aecc230c4aa8b9606ff7c4f5
|
/hitranlbl/dictionaries.py
|
5fb883541a95c3044653047baf8fb9d11749ee3e
|
[] |
no_license
|
xnx/www_hitran
|
https://github.com/xnx/www_hitran
|
c0e26a87134b7e2aac435008dde39ea4713ef633
|
023eb6aa75541b530330d13601be27aceed926de
|
refs/heads/master
| 2015-08-01T23:56:22 | 2013-01-18T12:39:20 | 2013-01-18T12:39:20 | 5,718,406 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from caseless_dict import CaselessDict
returnables_list = [
'XSAMSVersion',
'SchemaLocation',
'SourceID',
'SourceAuthorName',
'SourceTitle',
'SourcePageBegin',
'SourcePageEnd',
'SourceVolume',
'SourceYear',
'SourceCategory',
'SourceComments',
'RadTransID',
'RadTransComments',
'RadTransUpperStateRef',
'RadTransLowerStateRef',
'RadTransWavenumber',
'RadTransWavenumberUnit',
'RadTransWavenumberRef',
'RadTransWavenumberAccuracy',
'RadTransProbabilityA',
'RadTransProbabilityAUnit',
'RadTransProbabilityARef',
'RadTransProbabilityAAccuracy',
'RadTransProbabilityMultipoleValue',
'MoleculeChemicalName',
'MoleculeOrdinaryStructuralFormula',
'MoleculeStoichiometricFormula',
'MoleculeIonCharge',
'MoleculeID',
'MoleculeInchi',
'MoleculeInchiKey',
'MoleculeSpeciesID',
'MoleculeComment',
'MoleculeStructure',
'MoleculeStateID',
'MoleculeStateMolecularSpeciesID',
'MoleculeStateEnergy',
'MoleculeStateEnergyUnit',
'MoleculeStateEnergyOrigin',
'MoleculeStateTotalStatisticalWeight',
'MoleculeStateNuclearSpinIsomer',
'MoleculeStateQuantumNumbers',
'MoleculeQnStateID',
'MoleculeQnCase',
'MoleculeQnLabel',
'MoleculeQnValue',
'MoleculeQnAttribute',
'MoleculeQNElecStateLabel',
'MoleculeQnXML',
'EnvironmentID',
'EnvironmentTemperature',
'EnvironmentTemperatureUnit',
'EnvironmentTotalPressure',
'EnvironmentTotalPressureUnit',
'EnvironmentSpecies',
'EnvironmentSpeciesName',
]
RETURNABLES = {}
for returnable in returnables_list:
RETURNABLES[returnable] = 'dummy'
restrictable_types = CaselessDict({
'MoleculeChemicalName': str,
'MoleculeStoichiometricFormula': str,
'MoleculeInchiKey': str,
'InchiKey': str,
'RadTransWavenumber': float,
'RadTransWavelength': float,
'RadTransProbabilityA': float,
})
RESTRICTABLES = {}
for restrictable in restrictable_types:
RESTRICTABLES[restrictable] = 'dummy'
requestables_list = [
'Environments',
'Molecules',
'RadiativeTransitions',
'RadiativeCrossSections',
]
REQUESTABLES = {}
for requestable in requestables_list:
REQUESTABLES[requestable] = 'dummy'
EXAMPLE_QUERIES = {
"SELECT * WHERE (RadTransWavelength >= 10000.0 AND RadTransWavelength <= 100000.0) AND ((InchiKey IN ('DXHPZXWIPWDXHJ-VQEHIDDOSA-N','DXHPZXWIPWDXHJ-HQMMCQRPSA-N','DXHPZXWIPWDXHJ-UHFFFAOYSA-N')))": '(12C)(32S), (12C)(34S), (12C)(33S) lines between 10000 and 100000 Angstroms',
"SELECT ALL WHERE RadTransWavenumber>6000. AND RadTransWavenumber<6100. AND (MoleculeChemicalName in ('H2O', 'Ammonia') OR MoleculeStoichiometricFormula='HOCl')": 'H2O, NH3 and HOCl lines between 6000 and 6100 cm-1',
}
# the full URL to use these queries is:
# "vamdc.mssl.ucl.ac.uk/node/hitran/tap/sync/?REQUEST=doQuery&LANG=VSS2&FORMAT=XSAMS&QUERY=<query>"
|
UTF-8
|
Python
| false | false | 2,013 |
14,293,651,210,730 |
4d921fac232a9258776e8c7c990bd87fcba8690a
|
195d6a00ed824db8b7417948239f01e40ebbfe8b
|
/Public/generators/generators.py
|
3e368761dee46d3ca17dc14279c7be81af68a27d
|
[] |
no_license
|
bigsnarfdude/generatorsCoroutinesPYDATA
|
https://github.com/bigsnarfdude/generatorsCoroutinesPYDATA
|
a9dc687b89bc7f0aa61de637fc6396242feef581
|
1cd10d92792b9977f4eb4a806a685ab609ebada0
|
refs/heads/master
| 2021-01-10T22:02:08.583490 | 2012-11-27T17:43:47 | 2012-11-27T17:43:47 | 6,887,897 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from __future__ import division
__title__ = 'Generators and Co-Routines for Stream Data Processing'
__author__ = 'V James Powell <vpowell@gmail.com>'
# ABSTRACT
# this handout discusses the use of generator features in Python
# for data processing using a "stream" metaphor
# it discusses the distinction between generators, iterators, generator
# expressions and coroutines, including syntax and concepts
# it discusses the basic metaphor behind a "stream" processing code flow
# it also discusses optimisation techniques that this allows
# GOALS
# for novices, I'd like to introduce y'all to the concept of
# generators in a very rigourous fashion (and in a fashion that
# most tutorials do not discuss)
# I'd also like to introduce the topic of coroutines
# for advanced users, I'd like to discuss some optimisation proposals
# for problems that are modelled using generators and also discuss
# generators as one of the fundamental semantic pieces
# in Python (i.e., the primacy of __call__ and __iter__)
# also, I'd like to discuss a hard problem (how to write a function
# that is guaranteed not to exhaust a generator)
# SYNTAX
# from a syntax perspective, when we talk about generaotrs in Python, we're
# generally referring to:
# 1. iterator/generator protocol (an interface consisting of .next/__next__
# and, optionally, .send and .throw)
# 2. generator yield-syntax
# 3. generator expression syntax
# SYNTAX // ITERATOR PROTOCOL
# an iterator in Python is any object that defines an .__iter__() method and a
# .next method
# note: in Python3, the .next method is called .__next__
# these two methods are a fundamental part of the Python object data model:
# .__iter__() corresponds to `getiterfunc tp_iter` on PyTypeObject
# .next() corresponds to `iternextfunc tp_iternext` on PyTypeObject
# these two methods also correspond to the __builtin__ functions
# iter() for .__iter__()
# next() for .next()/.__next__()
# trivial iterator implements the most trivial iterator object
# this is an object that represents an infinite sequence of None values
class trivial_iterator(object):
def __iter__(self): return self
def __next__(self): return None
next = __next__
# trivial_iterator is an Iterator type, as it has both a .next and .__iter__()
# method
from collections import Iterator
assert issubclass(trivial_iterator, Iterator)
# we can iterate over it using for-loop syntax
for x in trivial_iterator():
pass
break
# let's modify the example slightly:
# our trivial iterator now has some internal notion of state
# this state is just a counter that counts from 0 up to 5
# once this internal counter hits 5, the iterator raises a StopIteration
# which is how we signal there are no more values
class trivial_iterator(object):
def __init__(self):
self.state = 0
def __iter__(self): return self
def __next__(self):
if self.state == 5:
raise StopIteration()
self.state += 1
return None
next = __next__
# we can still iterate over the above, and it will now terminate after
# yielding 5 None values
for x in trivial_iterator():
pass
# we can also see that we can pass it to any function that iteratos internally:
assert set(trivial_iterator()) == set([None])
assert list(trivial_iterator()) == [None, None, None, None, None]
assert tuple(trivial_iterator()) == (None, None, None, None, None)
# SYNTAX // FOR LOOPS
# we can look at our for-loop syntax as actually looking something like this:
for x in trivial_iterator():
pass
# is equivalent to
try:
iterable = iter(trivial_iterator()) # calls our .__iter__() method
while True:
x = next(iterable) # calls our .next()/.__next__() method
pass
except StopIteration:
pass
# SYNTAX // ITERATORS vs ITERABLES
# this suggests to us something very important:
# iterators are iterable objects plus a bit of state to track where
# you are in the iteration
# in other words, a list [1,2,3,4] is an Iterable but not an Iterator
# it's not an Iterator, because there's no place for us to store
# information about where we are in the iteration
# a trivial listiterator would look like:
class listiterator(object):
def __init__(self, list_obj):
self.list_obj = list_obj
self.idx = 0
def __iter__(self):
return self
def __next__(self):
if self.idx > len(self.list_obj):
raise StopIteration()
rv = self.list_obj[self.idx]
self.idx += 1
return rv
next = __next__
assert next(listiterator([1,2,3])) == 1
# SYNTAX // GENERATORS
# now, let's consider an iterator that takes a function and repeatedly calls
# it to provide values
class generatorlike(object):
def __init__(self, func, state=None):
self.state = None
self.func = func
def __iter__(self):
return self
def __next__(self):
self.state = self.func(self.state)
return self.state
next = __next__
def count(x):
x = 0 if x is None else x+1
if x == 3: raise StopIteration() # need a stop condition
return x
assert list(generatorlike(count)) == [0,1,2]
# notice that the state of the iterator is being encoded manually in the state
# of the instance itself
# consider that a function in Python creates a stack frame object
from inspect import currentframe, stack
def foo():
print currentframe() # shows the current stack frame for this function
print stack() # shows the stack at this stage
# this stack object is an object like any other
# it has members that represent local variables, that represent the code that
# is being executed, that represent our current execution point
def foo():
print currentframe().f_lasti # last instruction executed
print currentframe().f_locals # locals
print currentframe().f_code # code
# when the function returns, the stack object's references are decremented
# and it is (probably) garbage collected
# now, the state that our generator like object needs to track
# could easily be encoded in this frame object
# this leads us into the yield-syntax
def count():
yield 0
yield 1
yield 2
assert hasattr(count(),'__iter__')
assert hasattr(count(),'next')
from collections import Iterator, Iterable
assert isinstance(count(),Iterator)
assert isinstance(count(),Iterable)
# we can conceptualise this syntax as creating an object that is just a
# function that is not garbage collected until it runs to exhaustion
# but that can return control at any point
# for example:
from inspect import currentframe
def count():
print 'first', currentframe().f_lasti
yield 0
print 'second', currentframe().f_lasti
yield 1
print 'third', currentframe().f_lasti
yield 2
c = count()
print 'for loop'
for x in c:
print x
# one thing to take note of is the flow of execution
# we need an instance to capture the state of each particular
# iteration, which is why all yield-syntax generators
# must be __call__ed
# that is, the generator definition is akin to a class
# definition, which we have to instantiate to use
# the first iteration starts at the top of the function and runs until
# after it yields the first value
# each subsequence iteration runs from the instruction after the previous
# yield until yielding the next value
# the end of the function raises a StopIteration which is silently
# handled by the for-loop
# one interesting thing to note is the difference between the bytecode
# of a function and a generator
def gen():
yield
def func():
return
from dis import dis
dis(gen)
# 210 0 LOAD_CONST 0 (None)
# 3 YIELD_VALUE
# 4 POP_TOP
# 5 LOAD_CONST 0 (None)
# 8 RETURN_VALUE
dis(func)
# 212 0 LOAD_CONST 0 (None)
# 3 RETURN_VALUE
# a generator is actually identical in bytecode to a function
# with the addition of yielding a value (which returns control and a value
# to the calling function without cleaning up the stackframe)
# note that yield-syntax generators are able to return, but they are not
# allowed to return non-None values (* this restriction has been
# lifted in Python3 to accomodate `yield from` syntax)
def count():
value = yield 1
print 'yielded value:', value
for x in count():
print x
print 'loop end'
# note that the print statement in the above generator is created
# when attempting to retrieve the next() value in the SECOND iteration
# of the loop
c = count()
print next(c)
try:
print 'try begin'
print next(c)
except StopIteration:
pass
else:
assert False, 'should have raised the StopIteration'
# to send in a non-None value, we can use the .send() method on the generator
# SYNTAX // GENERATOR METHODS: .send(), .throw(), .close()
# a generator created via yield-syntax comes with this and two other methods:
# .send() to retrieve the next value and send a value into the generator
# .throw() to raise an exception at the point of execution
# .close() to raise a GeneratorExit exception to allow clean-up
# these methods are not part of the fundamental Python object data model, and
# they are specific to generator objects created via the yield syntax
def latch(value):
while True:
v = (yield value)
if v is not None:
value = v
x = latch('first')
print '1', next(x)
print '2', next(x)
print '3', x.send('second')
print '4', next(x)
print '5', next(x)
print '6', next(x)
# SYNTAX // PUMPING A GENERATOR
# note that because the first pass through the generator executes
# instructions up to the yielding of first value
# there is no way to capture any information in this pass
# therefore, the first send() call must send a None value
# sending a non-None value will raise a TypeError
# calling next() immediately after creating a generator instance in order
# to set it up to receive values is often called 'pumping' or 'priming' it
try:
x = latch(10)
x.send(20)
except TypeError as e:
print e
else:
assert False, 'should have raised the TypeError'
# of course, we can get around this wart with a nice decorator:
def pump(gen):
def pumped(*args, **kwargs):
g = gen(*args, **kwargs)
next(g)
return g
return pumped
@pump
def latch(value):
while True:
value = (yield value) or value
x = latch(10)
print next(x)
print next(x)
print next(x)
print x.send(20)
print next(x)
print next(x)
print next(x)
# SYNTAX // GENERATOR EXPRESSIONS
# generator expression is just a simpler version of yield-syntax
# which lacks the .send()/.throw()/.close() methods
# and which is an instance of the generator in itself
gen = (y for y in [1,2,3])
from collections import Iterable
assert isinstance(gen,Iterable)
for x in gen:
print x
# note that a generator expression can be iterated over only once
# as it is an instance of a generator rather than a definition of
# a generator
# in yield-syntax, code is executed only as we iterate over the generator
# however, note that in a generator expression, the iterated over expression
# is evaluated at definition time:
def foo():
print 'foo'
return [1,2,3]
gen = (y for y in foo())
# SYNTAX // GENERATORS ARE DELAYED/LAZY COMPUTATIONS
# so iterator objects like generators are able to model delayed computations
# in that we can structure the computation to run only as results are required:
from time import sleep
def slow_func():
x = 1
sleep(.05)
y = 2
sleep(.05)
z = 3
sleep(.05)
return (x,y,z)
def slow_gen():
yield 1
sleep(.05)
yield 2
sleep(.05)
yield 3
sleep(.05)
# in the former example, we do all processing up front and then return
# the complete value
# in the latter, we return values as desired ("lazily")
# note that in the case that we need to consume the entire
# computation before continuing, this doesn't gain us much:
assert list(slow_func()) == list(slow_gen())
# SYNTAX // EXAMPLES: Project Euler #1
# however, let's consider the following problem from Project Euler
# this is an extremely synthetic (contrived) example
# Problem 1
# 05 October 2001
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we
# get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
def euler1():
total = 0
for x in xrange(1,1001):
if x%3 == 1 or x%5 == 0:
total += x
return total
# note that we have muddied our solution with the need to manually track some
# state in the form of the total variable
# also, consider what we could parameterise here: the upper bound, the
# multiples
def euler1a(upper_bound=1001, multiples=(3,5)):
total = 0
for x in xrange(1,upper_bound):
if any(not x%y for y in multiples):
total += x
return total
# note that one interesting thing I've done in the above is replace
# x%y == 0 with not x%y
# this may seem silly, and it is, indeed, incredibly silly
# but consider that `%` means modulo, and modulo
# has meaningful semantics for anything that is divisible: e.g.,
# polynomials can be divided, and the remainder of division of
# some polynomial objects might might be represented as a polynomial object
# where it's value is not comparable to the integer 0 but may represent
# the 0-value for that space. here, it might be easiest to just rely on
# considering this value in logical terms of being True- or False-equivalent
# (i.e., the remainder value is True-equivalent if it exists and is not the
# 0-value for this calculation structure)
# but what if we wanted to find the sum of all numbers in the fibonacci
# sequence that are multiples of 3 or 5? (an admittedly synthetic example)
def euler1b(seq, multiples=(3,5)):
total = 0
for x in seq:
if any(not x%y for y in multiples):
total += x
return total
# we're also losing some information by summing all of these values together
# if we wanted to find the max or the min, we would have to write new functions!
def euler1c(seq=xrange(1,1001), multiples=(3,5)):
total = []
for x in seq:
if any(not x%y for y in multiples):
total.append(x)
return total
assert sum(euler1c()) == 234168
# but do we necessarily need to wait until we've calculated all the values
# before actually doing something with them?
# what if, for example, we just want the FIRST value that is a multiple
# of 3 or 5 in our sequence
# need to wrap with iter(), because euler1c() returns a list
# which is Iterable but not an Iterator (see above for distinction!)
assert next(iter(euler1c())) == 3
# here's where we can use a generator
def euler1d(seq=xrange(1,1001), multiples=(3,5)):
for x in seq: # for each value in seq
if any(not x%y for y in multiples): # yield it if it's a multiple of
yield x # any of the multiples
# note how our function has suddenly slimmed down! we no longer
# explicitly track any state (this state is implicitly tracked for us
# via the stack frame)
# notice how each line of our function now relates to the problem we're
# trying to solve, and note how it should be much more readable now
# also, notice how much more flexible our solution has become
# we don't care about what the sequence provided is
assert next(euler1d()) == 3
assert min(euler1d()) == 3
assert max(euler1d()) == 1000
assert sum(euler1d()) == 234168
# note that when we discussed the generator protocol, we did not
# state that a generator is a Sized object: i.e., is an object
# with a __len__ method that can determine it's own size
try:
len(euler1d())
except TypeError as e:
print e
else:
assert False, 'should have raised the TypeError'
# ASIDE // LENGTH OF AN ABSTRACT ITERATOR
# so, as an aside, how do we calculate the length of a generator?
xs, ys = list(euler1d()), euler1d()
assert len(xs) == sum(1 for _ in ys)
# unfortunately, while this technique works, it means we cannot
# determine the length of a generator without losing all of its elements
# or without calculating all of its elements!
# this tells us one important thing: generators are one way; generators are
# "exhaustible"
# ASIDE // AVERAGE AND VARIANCE OF AN ABSTRACT ITERATOR
# another aside, how do we calculate the average of a generator
# without iterating it over twice?
xs, ys = list(euler1d()), euler1d()
assert len(xs) == sum(1 for _ in ys)
xs, ys = list(euler1d()), euler1d()
assert sum(xs)/len(xs) == \
(lambda sum, len: sum/len)(
*reduce(
lambda *ts: tuple(sum(t) for t in zip(*ts)),
((y,1) for y in ys)))
# or, cuter:
xs, ys = list(euler1d()), euler1d()
assert sum(xs)/len(xs) == (lambda x: x.real/x.imag)(sum(y+1j for y in ys))
# this is gratuitous:
# what about the variance?
# variance = average of (x-u)^2 = average of x - average of x^2
xs, ys = list(euler1d()), euler1d()
mean = lambda xs: sum(xs)/len(xs)
assert abs(sum((x-mean(xs))**2 for x in xs)/len(xs) -
(lambda a,b,c: a/c - (b/c)**2)(
*reduce(
lambda *ts: tuple(sum(t) for t in zip(*ts)),
((y**2,y,1) for y in ys)))) < 0.1 # need to check with tolerance
# SYNTAX // EXAMPLE: Fibonnaci sequence
# let's look at another simple example, the aforementioned fibonacci series
# a formulation using a function might look like this:
def fib(upper_bound):
rv = [0,1]
while rv[-1] + rv[-2] < upper_bound:
rv.append( rv[-1] + rv[-2] )
return rv
assert fib(10) == [0, 1, 1, 2, 3, 5, 8]
# notice that this formulation is actually quite nice to read
# the rv.append( ... ) line encodes our recurrence relationship very clearly
# note that there are some limitations to the above:
# first, we cannot easily rewrite the above to take either an upper bound
# or take a number of elements; in fact, restricting the returned values
# from fib is a detail we have no control over and also a detail
# unrelated to the problem we're trying to solve
# additionally, we have to compute every value of fib() before we can use
# any of the values and our solution will require memory on the order
# of the number of values returned, even if we don't need that much memory
# e.g., if we do sum(fib(10)), we don't need to know about more
# than a single value at a time!
def fib(a=0, b=1):
while True:
yield a
a, b = b, a+b
# get the first ten elements by slicing
from itertools import islice
assert list(islice(fib(),10)) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
# get the elements under 10
from itertools import takewhile
assert list(takewhile(lambda x: x < 10, fib())) == [0, 1, 1, 2, 3, 5, 8]
# note that fib() is also an infinite generator
# it will continue to yield values forever (and instances will never get
# garbage collected until they are totally dereferenced or
# they are explicitly told to .close())
# we can think of some other problems that naturally lend themslevs
# to this kind of structure
# note that these generators also remove a constraint:
# by merely yielding values back to the caller, they no longer
# have to specify the returned structure: values can be yielded
# back and the caller can decide to put them in a list, a set, a
# ring buffer, &c.
# SYNTAX // EXAMPLE: prime fountain
# for example, a simple prime fountain (using a very inefficient algorithm)
# (note that this is not a sieve algorithm)
from itertools import count
def primes():
primes = set()
for x in count(2):
if all(x%p for p in primes):
yield x
primes.add(x)
from itertools import islice
print list(islice(primes(),None,10))
# C EXAMPLE:
# nwise.c
# the C example shows us an interesting detail
# say we have some instrumentation with a nice C-API that throws samples
# at us one at a time
# we can easily wrap this API in less than 200 lines of code
# and expose them to Python
# INTRO TO ITERTOOLS:
# itertools_intro.py (missing: itertools.chain)
# FURTHER EXAMPLES:
# {buffered,unbuffered}.py
# greedy.py
# {treeify,find,to_ranges}.py
# STREAM PROCESSING
# the exhaustibility of a generator is part of their fundamental denotational
# semantics
# that is, a callable that takes a generator as an argument must specify
# whether it exhausts (partially or fully) by observing any calculations
# if that function touches the generator (by asking for any value) a stateful
# change has been made, the reprecussions of which may be unobservable
# in other areas where the generator is referenced
# as a consequence, generators are generally best used as either infinite
# sequences with an explicit statefulness (e.g., as a coroutine)
# or as units of calculation in a pipeline where the foremost
# input is wholly consumed by the calculation
# so we should think of this as a tool that can be useful for structuring
# series of computations in series
# since we have easy interoperability with C, we can, for example,
# take a process that spits out a data via a C API
# OPTIMISATIONS & PERFORMANCE
# there's a consideration of optimisation and performance when
# we talk about these structures
# there is an indisputable performance benefit to the use of generators
# in the asymptotic sense: generators can encode a problem with
# the same time-complexity but better space-complexity (often O(n) to O(1))
# one might also see microbenchmarks talking about how simple operations
# are faster since the code is executed from "within C"
# these may seem dubious, and I personally believe that they tend to be
# fairly dubious
# however, structuring a computation as a sequence of generators
# opens us up to a few interesting optimisation possibilities
# generally, generators are opaque units, like functions
func1 = func2 = func3 = lambda x: list(x)
gen1 = gen2 = gen3 = lambda xs: (x for x in xs)
input_data = (x for x in xrange(10))
output_data = func1(func2(func3(input_data)))
input_data = (x for x in xrange(10))
output_data = gen1(gen2(gen3(input_data)))
from itertools import tee
input_data = tee((x for x in xrange(10)),2)
assert func1(func2(func3(input_data[0]))) == \
list( gen1( gen2( gen3(input_data[1]))) )
# in the first example, each function has no ability to look into
# any of the other functions or perform and specialisations as a result
# each unit is totally opaque
# in the latter example, this is true, except because the computations are
# delayed, each subsequent unit has slightly more information:
# it can operate on partial results
# therefore, we could create an optimisation as follows:
from itertools import islice
class partialdict(dict):
def __missing__(self, key):
return self[frozenset(islice(iter(key),1,None))]
class gentype(object):
dispatch = partialdict({frozenset(): lambda gen,arg: (gen(arg),'')})
def __init__(self, gentype):
self.gentype = gentype
def __call__(self, gen):
return typed(gen, gentype=self.gentype)
class typed(object):
def __init__(self, gen, gentype):
self.gen, self.gentype = gen, gentype
def __call__(self, arg):
arg_gentype = arg.gentype if isinstance(arg, wrapped) else ''
dispatch = gentype.dispatch[frozenset([self.gentype, arg_gentype])]
rv, rv_type = dispatch(self.gen, arg)
if not isinstance(rv, wrapped) or rv.gentype != rv_type:
rv = wrapped(rv, rv_type)
return rv
class wrapped(object):
def __init__(self, gen, gentype):
self.gen, self.gentype = gen, gentype
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
next = __next__
def send(self, *args, **kwargs):
return self.gen.send(*args, **kwargs)
def throw(self, *args, **kwargs):
return self.gen.throw(*args, **kwargs)
def close(self, *args, **kwargs):
return self.gen.close(*args, **kwargs)
passthrough = gentype('passthrough')
gentype.dispatch[frozenset([passthrough.gentype, passthrough.gentype])] = \
lambda gen,arg: (arg, passthrough.gentype)
gentype.dispatch[frozenset([passthrough.gentype])] = \
lambda gen,arg: (arg, passthrough.gentype)
@passthrough
def f(xs):
print 'fff'
for x in xs:
yield x
@passthrough
def g(xs):
print 'ggg'
for x in xs:
yield x
def h(xs):
print 'hhh'
for x in xs:
yield x
result = f(g(h(x for x in xrange(10))))
print result
print list(result)
# PERFORMANCE // C++ vs Python
# performance/bythrees.c
# performance/bythrees.py
# FUNDAMENTAL INTERFACE // PRIMACY OF __iter__ & __call__
# say we have a function of the form:
def append_one(xs):
xs.append(1)
# we have no ability to control the type of the input parameter, xs
try: append_one([])
except AttributeError as e: print e
else: assert True, 'should NOT have seen AttributeError'
try: append_one({})
except AttributeError as e: print e
else: assert False, 'should have seen AttributeError'
try: append_one(set())
except AttributeError as e: print e
else: assert False, 'should have seen AttributeError'
try: append_one(tuple())
except AttributeError as e: print e
else: assert False, 'should have seen AttributeError'
# we cannot control these types statically
# we can do it dynamically, but this is mostly meaningless
def append_one(xs):
assert hasattr(xs,'append')
xs.append(1)
# all we've done is trade an AttributeError for an AssertionError!
# because we cannot control the types statically, it seems to me
# that we should instead attempt to minimise our exposure to these kinds of
# errors
# notice that the only typing we have available is a form of structural
# typing, where we type on the presence of certain, specially named methods
# we have no insight into the methods themselves: they are totally opaque
# so we do not have and will never have the ability to type on or
# control the implementation of these parameters
# that is, .append() might exist, but it could do anything
# we can minimise these kinds of errors by limiting ourselves to certain,
# established interface,s and it appears that the two, prime interfaces
# in Python are __iter__ and __call__
def append_one(updater):
updater(1)
append_one([].append)
append_one(lambda x,y=(): y + (x,) )
append_one(set().add)
append_one(lambda x,y={}: y.update({x:x}))
# __call__ is a fundamental interface that is available to:
# generators
# functions (and C-functions)
# type objects
# class objects
# instance objects (via tp_call/__call__/())
# another fundamental interface is __iter__/next
# this is the interface available to:
# generators
# all collection types in Python (set(), {}, [], (), '')
# it is the interface used by the `for` construct
# of course, the __iter__ interface can be thought of as
# subordinate to the __call__ interface, as, for any iterable object,
# you can do `f(x.next)` in place of `f(x)`
# if we encode the complexity of our problems in such a way
# that the only required interfaces are __call__ and __iter__
# we open ourselves up to an enormously flexible mode
# FUNDAMENTAL INTERFACE // COMPLEXITY PATHWAYS
# ???
# FUNDAMENTAL INTERFACE // Example: greedy{_simple,}.py
# HARD PROBLEM
# we looked at an example of calculating the variance of a generator
# and noticed that, in order to stay within a constant memory usage
# constraint, we would need to collect all values within a single pass
# what if we had a basic function that we did NOT want to exhaust some
# provided Iterable
# this problem looks simple, but it is, in fact, extremely difficult
# to solve completely, as the deeper you try to solve it, the
# more you have to touch (or hack around)
# we just want to peek one value ahead without changing xs
def peek(xs):
return next(xs)
# however, each time we peek, xs is advanced
xs = (x for x in xrange(1,100) if not x%3)
assert peek(xs) == 3 and peek(xs) <> 3
# we know that we can tee an Iterable to copy it if we want to be able to
# traverse it twice
xs = (x for x in xrange(1,100) if not x%3)
from itertools import tee
xs1, xs2 = tee(xs,2)
assert peek(xs1) == 3 and peek(xs2) == 3
# however, because a generator is an opaque structure, this means that
# we can never control the internal state of the generator
# once we try to "observe" a value (i.e., force a calculation to yield
# the next value)
# as a consequence, tee() acts merely as a buffer and the original
# iterable is no longer usable
# the original iterable will be mutated by iteration through the tee()
# proxies to it, and these proxies will handle buffering the values
# yielded
# as a result, we CANNOT rewrite peek as follows:
def peek(xs):
xs,_ = tee(xs,2)
return next(xs)
xs = (x for x in xrange(1,100) if not x%3)
assert peek(xs) == 3 and peek(xs) <> 3 # still doesn't work!
# additionally, from within the scope of the function,
# there's no way we can "reach out" to tell the caller to make
# a buffering copy
# the only thing we can do is force the caller to explicitly
# handle the buffering copy via tee() before calling out function
# this means that exhaustion is part of the denotational semantics
# of Python functions that accept iterables
# the function must specify whether it iterates through or exhausts its
# parameters so the caller can know whether or not they need to
# make buffering copies
# the only trick around this is as follows:
def swaparg(pos, argspec):
def parse(args, kwargs):
if isinstance(pos,basestring) and pos in kwargs:
source, pos_ = kwargs, pos
elif isinstance(pos,basestring):
source, pos_ = args, argspec.args.index(pos)
elif argspec.args[pos] in kwargs:
source, pos_ = kwargs, argspec.args[pos]
else:
source, pos_ = args, pos
return source, pos_
def find(args, kwargs):
source, pos = parse(args, kwargs)
return source[pos]
def replace(args, kwargs, value):
source, pos = parse(args, kwargs)
source[pos] = value
return args, kwargs
return find, replace
from itertools import chain
from inspect import stack, getargspec
def copied_iterable(pos):
def decorator(func):
argspec = getargspec(func)
find, replace = swaparg(pos, argspec)
def wrapper(*args, **kwargs):
it = find(args, kwargs)
caller_copy, local_copy = tee(it,2)
# replace in callers
caller_frames = (frame[0] for frame in stack()[1:])
caller_locals = (frame.f_locals for frame in caller_frames)
replacements = (((ls,k) for k,v in ls.iteritems() if v is it)
for ls in caller_locals)
for ls,k in chain.from_iterable(replacements):
ls[k] = caller_copy
# replace locally
args, kwargs = replace(list(args), kwargs.copy(), local_copy)
return func(*args, **kwargs)
return wrapper
return decorator
@copied_iterable('xs')
def peek(xs):
return next(xs)
xs = (x for x in xrange(1,100) if not x%3)
assert peek(xs) == 3 and peek(xs) == 3 # works!
assert peek(xs=xs) == 3 and peek(xs=xs) == 3 # works!
# this illustrates an issue with Python decorators
# we lose (and have no way to reconstruct) the original
# argspec of the original function
# therefore, the following will fail!
# @copied_iterable('xs')
# @copied_iterable('ys')
# def peek(xs,ys):
# return next(xs), next(ys)
# xs = (x for x in xrange(1,100) if not x%3)
# ys = (y for y in xrange(1,100) if not y%5)
# assert peek(xs,ys) == (3,5) and peek(xs,ys) == (3,5) # works!
# functions are opaque, in that we aren't guaranteed to be able to
# inspect and control them as above, so the above technique might fail if
# some functions higher in the stack frame are not Python functions
# additionally, let's consider that somewhere in the caller of peek,
# we could have sent the value of the iterable we want to copy
# to some generator
# the generator's frames won't show up in the stack frames of
# peek(), so we also need to go in and search through every
# generator to make sure that it doesn't have an old reference
# to the old iterable
# generators are opaque in that we can't really inspect or control
# them from the outside except via the interface next/send/throw/close
# therefore, even if we can find all the generators that might have
# captured references to the iterable, we might be unable to alter them
|
UTF-8
|
Python
| false | false | 2,012 |
18,837,726,580,808 |
7ab09993fe9fc3867d9027f64f77eb85e742822b
|
097a8bf4879750f5de54d5a0f61c9b6a42c967f8
|
/handler/__init__.py
|
ecc2cf19aecf148e54dfdb906f5d3b98075f881d
|
[] |
no_license
|
zqqf16/znote
|
https://github.com/zqqf16/znote
|
19ef704f04e0a4d13ff54c2a13d719958e78f7d7
|
a1527237d5400cb7a090f6e67e0d3eb1b422d6a0
|
refs/heads/master
| 2020-05-29T17:27:23.289224 | 2013-04-19T07:46:28 | 2013-04-19T07:46:28 | 9,374,641 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import admin
import frontend
import article
import category
__all__ = [admin, frontend, article, category]
|
UTF-8
|
Python
| false | false | 2,013 |
2,645,699,900,619 |
44736ea8bfa6563bfd49d46a6c259d60fbf7d4ed
|
231de155fa779528706cd25e59dd87a353f6296f
|
/rodunu/utils/regex.py
|
7ea9a3a4b49693e41126ecee2758ba27763d9c80
|
[] |
no_license
|
jsunstrom/rdev
|
https://github.com/jsunstrom/rdev
|
ab73db79442ef66a1391b04df83bb55777ecf377
|
92801b0c32ec6b1f69e014f38a4964aa07f963ab
|
refs/heads/master
| 2020-05-17T01:04:52.798703 | 2011-04-29T03:39:27 | 2011-04-29T03:39:27 | 1,668,353 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
def is_fraction(val):
regex = re.compile('(\d+)(\s)?(\d+)?(/\d+)?', re.IGNORECASE)
m = regex.search(val)
if m:
return True
else:
return False
def is_float(val):
regex = re.compile('([+-]?\d*\.\d+)(?![-+0-9\.])', re.IGNORECASE)
m = regex.search(val)
if m:
return True
else:
return False
|
UTF-8
|
Python
| false | false | 2,011 |
730,144,463,204 |
58aa8e3a0f3a3692fee7e6e6d974232db86f3c1c
|
08604e449be9ebcf66f4b49d42103cb099576329
|
/test/core/comm.py
|
3fe8362991f61d2721ac42c8776c2859fde95b96
|
[
"GPL-2.0-only"
] |
non_permissive
|
steder/maroonmpi
|
https://github.com/steder/maroonmpi
|
9307a7cd4c2ea8498d71e4c1dc261a484049bbf9
|
063473abc05e2ee720bcc0564ebcae255fa013ab
|
refs/heads/master
| 2016-09-06T19:50:47.261539 | 2012-02-16T04:02:04 | 2012-02-16T04:02:04 | 3,456,636 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys,mpi
import Numeric
import unittest
class MPI_Comm_TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCommRank( self ):
rank = mpi.comm_rank( mpi.MPI_COMM_WORLD )
print rank
self.assert_( rank >= 0 )
def testCommSize( self ):
size = mpi.comm_size( mpi.MPI_COMM_WORLD )
print size
self.assert_( size >= 1 )
def testCommSplitSingletons( self ):
rank = mpi.comm_rank( mpi.MPI_COMM_WORLD )
#print "testCommSplitSingletons:",rank
#newcomm = mpi.comm_split( mpi.MPI_COMM_WORLD, rank, 0 )
#nrank = mpi.comm_rank( newcomm )
#nsize = mpi.comm_size( newcomm )
#self.assert_( nrank == 0 )
#self.assert_( nsize == 1 )
self.assert_( True )
# def testCommSplitEvenOdd( self ):
# """
# Create even and odd communicators and verify
# that all the processors in the new even/odd communicators
# belong there.
# """
# size = mpi.comm_size( mpi.MPI_COMM_WORLD )
# rank = mpi.comm_rank( mpi.MPI_COMM_WORLD )
# if ( (rank % 2) == 0 ):
# # Even case
# newcomm = mpi.comm_split( mpi.MPI_COMM_WORLD, 0, 0 )
# else:
# # Odd case
# newcomm = mpi.comm_split( mpi.MPI_COMM_WORLD, 1, 0 )
# # Verify that all the ranks in each communicator are even/odd
# nsize = mpi.comm_size( newcomm )
# ranks = mpi.gather( rank, 1, mpi.MPI_INT, nsize,mpi.MPI_INT, 0, newcomm )
# ranks = mpi.bcast( ranks, nsize, mpi.MPI_INT, 0, newcomm )
# for r in ranks:
# self.assert_( (r%2) == (rank%2) )
# def testDup( self ):
# rank = mpi.comm_rank( mpi.MPI_COMM_WORLD )
# size = mpi.comm_size( mpi.MPI_COMM_WORLD )
# newcomm = mpi.comm_dup( mpi.MPI_COMM_WORLD )
# nrank = mpi.comm_rank(newcomm)
# nsize = mpi.comm_size(newcomm)
# self.assert_( rank == nrank )
# self.assert_( size == nsize )
def suite():
suite = unittest.TestSuite()
# suite.addTest(MPI_SendRecv_TestCase("testMethodName"))
suite.addTest( MPI_Comm_TestCase("testCommRank") )
suite.addTest( MPI_Comm_TestCase("testCommSize") )
suite.addTest( MPI_Comm_TestCase("testCommSplitSingletons") )
# suite.addTest( MPI_Comm_TestCase("testCommSplitEvenOdd") )
# suite.addTest( MPI_Comm_TestCase("testDup") )
return suite
if __name__=="__main__":
rank, size = mpi.init(len(sys.argv), sys.argv)
mysuite = suite()
test_runner = unittest.TextTestRunner()
result = test_runner.run( mysuite )
print result
mpi.finalize()
|
UTF-8
|
Python
| false | false | 2,012 |
16,844,861,762,596 |
5df18a4a9f8995d0698d1f0472300843f1a089fa
|
7c78e2a9236e7486479773959011fd6026504569
|
/genPrimes.py
|
12be4ebbd7d0c25d4f286101c905a0c366eab30e
|
[] |
no_license
|
muthazhagu/wordpress
|
https://github.com/muthazhagu/wordpress
|
14b04f2f5e69b768b1f9c49cba287345b413c964
|
cf9a2d678b295c55183a06153e51a447b6026dc3
|
refs/heads/master
| 2016-03-24T06:46:18.689098 | 2013-09-28T15:36:38 | 2013-09-28T15:36:38 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import itertools as it
def genPrimes():
yield 2
iterable = iter(it.count(3))
prime = [2]
isprime = []
while True:
isprime = []
num = iterable.next()
if num % 2 == 0:
pass
else:
altprime = prime[:]
for val in altprime:
if num % val != 0:
pass
else:
isprime.append(1)
if sum(isprime) == 0:
prime.append(num)
yield prime[-1]
def generateKprimes(k):
primenumbergenerator = genPrimes()
for i in range(k):
print primenumbergenerator.next()
generateKprimes(10)
|
UTF-8
|
Python
| false | false | 2,013 |
2,757,369,039,536 |
231e8bc20526d62ec8456c82db868748bec4e650
|
eeefcf8327689bb7ac63bd2c1eb83d8a89165a3d
|
/tests/test_tokenizer.py
|
227a57a75a65915865f3c6ffa11bed6e008e9829
|
[
"MIT"
] |
permissive
|
YuukanOO/beard
|
https://github.com/YuukanOO/beard
|
3602441f1f7e893e27f14edcaf590037e868f9f5
|
bedb541d313f1a093d08eb7598c1b23841e53541
|
refs/heads/master
| 2021-01-20T10:46:19.036865 | 2014-02-27T19:37:36 | 2014-02-27T19:37:36 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from beard import pos
import unittest
import os
class TestTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = pos.Tokenizer('/')
def test_tokenize(self):
string = "Un/Det:Art chat/Nom mange/Ver:Pres une/Det:Art souris/Nom./Punc"
tokens = self.tokenizer.tokenize(string)
self.assertEqual(len(tokens), 6)
w, p = tokens[2]
self.assertEqual(w, 'mange')
self.assertEqual(p, 'Ver:Pres')
def test_tokenize_from_file(self):
tokens = self.tokenizer.tokenize_from_file(os.path.dirname(__file__) + '/sample_01.corpus')
self.assertTrue(tokens)
self.assertEqual(len(tokens), 25)
w, p = tokens[2]
self.assertEqual(w, 'est')
self.assertEqual(p, 'Ver:Pres')
|
UTF-8
|
Python
| false | false | 2,014 |
14,585,708,941,060 |
72e804b7bd6b9ba8b75f480d124400e54c8d242e
|
507812c8fe0d6a08074f9f99db981ef043d0cdfe
|
/smokesignal.py
|
c87157af32ab171e9385d89988308e4599b0770e
|
[
"MIT"
] |
permissive
|
JamesHyunKim/smokesignal
|
https://github.com/JamesHyunKim/smokesignal
|
6896dcb9bb7bd96984ff2f3857fb357b34e89b7b
|
9099d70a321e0752e806cb3d0c08cf84aeba4420
|
refs/heads/master
| 2021-01-22T14:02:51.144483 | 2014-08-12T23:29:02 | 2014-08-12T23:29:02 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
smokesignal.py - simple event signaling
"""
import sys
import types
from collections import defaultdict
from functools import partial
__all__ = ['emit', 'emitting', 'signals', 'responds_to', 'on', 'once',
'disconnect', 'disconnect_from', 'clear', 'clear_all']
# Collection of receivers/callbacks
_receivers = defaultdict(set)
_pyversion = sys.version_info[:2]
def emit(signal, *args, **kwargs):
"""
Emits a single signal to call callbacks registered to respond to that signal.
Optionally accepts args and kwargs that are passed directly to callbacks.
:param signal: Signal to send
"""
for callback in set(_receivers[signal]): # Make a copy in case of any ninja signals
_call(callback, args=args, kwargs=kwargs)
class emitting(object):
"""
Context manager for emitting signals either on enter or on exit of a context.
By default, if this context manager is created using a single arg-style argument,
it will emit a signal on exit. Otherwise, keyword arguments indicate signal points
"""
def __init__(self, exit, enter=None):
self.exit = exit
self.enter = enter
def __enter__(self):
if self.enter is not None:
emit(self.enter)
def __exit__(self, exc_type, exc_value, tb):
emit(self.exit)
def _call(callback, args=[], kwargs={}):
"""
Calls a callback with optional args and keyword args lists. This method exists so
we can inspect the `_max_calls` attribute that's set by `_on`. If this value is None,
the callback is considered to have no limit. Otherwise, an integer value is expected
and decremented until there are no remaining calls
"""
if not hasattr(callback, '_max_calls'):
callback._max_calls = None
if callback._max_calls is None or callback._max_calls > 0:
if callback._max_calls is not None:
callback._max_calls -= 1
return callback(*args, **kwargs)
def signals(callback):
"""
Returns a tuple of all signals for a particular callback
:param callback: A callable registered with smokesignal
:returns: Tuple of all signals callback responds to
"""
return tuple(s for s in _receivers if responds_to(callback, s))
def responds_to(callback, signal):
"""
Returns bool if callback will respond to a particular signal
:param callback: A callable registered with smokesignal
:param signal: A signal to check if callback responds
:returns: True if callback responds to signal, False otherwise
"""
return callback in _receivers[signal]
def on(signals, callback=None, max_calls=None):
"""
Registers a single callback for receiving an event (or event list). Optionally,
can specify a maximum number of times the callback should receive a signal. This
method works as both a function and a decorator::
smokesignal.on('foo', my_callback)
@smokesignal.on('foo')
def my_callback():
pass
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
:param max_calls: Integer maximum calls for callback. None for no limit.
"""
if isinstance(callback, int) or callback is None:
# Decorated
if isinstance(callback, int):
# Here the args were passed arg-style, not kwarg-style
callback, max_calls = max_calls, callback
return partial(_on, signals, max_calls=max_calls)
elif isinstance(callback, types.MethodType):
# callback is a bound instance method, so we need to wrap it in a function
def _callback(*args, **kwargs):
return callback(*args, **kwargs)
return _on(signals, _callback, max_calls=max_calls)
else:
# Function call
return _on(signals, callback, max_calls=max_calls)
def _on(on_signals, callback, max_calls=None):
"""
Proxy for `smokesignal.on`, which is compatible as both a function call and
a decorator. This method cannot be used as a decorator
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
:param max_calls: Integer maximum calls for callback. None for no limit.
"""
assert callable(callback), 'Signal callbacks must be callable'
# Support for lists of signals
if not isinstance(on_signals, (list, tuple)):
on_signals = [on_signals]
callback._max_calls = max_calls
# Register the callback
for signal in on_signals:
_receivers[signal].add(callback)
# Setup responds_to partial for use later
if not hasattr(callback, 'responds_to'):
callback.responds_to = partial(responds_to, callback)
# Setup signals partial for use later.
if not hasattr(callback, 'signals'):
callback.signals = partial(signals, callback)
return callback
def once(signals, callback=None):
"""
Registers a callback that will respond to an event at most one time
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
"""
return on(signals, callback, max_calls=1)
def disconnect(callback):
"""
Removes a callback from all signal registries and prevents it from responding
to any emitted signal.
:param callback: A callable registered with smokesignal
"""
# This is basically what `disconnect_from` does, but that method guards against
# callbacks not responding to signal arguments. We don't need that because we're
# disconnecting all the valid ones here
for signal in signals(callback):
_receivers[signal].remove(callback)
def disconnect_from(callback, signals):
"""
Removes a callback from specified signal registries and prevents it from responding
to any emitted signal.
:param callback: A callable registered with smokesignal
:param signals: A single signal or list/tuple of signals
"""
# Support for lists of signals
if not isinstance(signals, (list, tuple)):
signals = [signals]
# Remove callback from receiver list if it responds to the signal
for signal in signals:
if responds_to(callback, signal):
_receivers[signal].remove(callback)
def clear(*signals):
"""
Clears all callbacks for a particular signal or signals
"""
signals = signals if signals else _receivers.keys()
for signal in signals:
_receivers[signal].clear()
def clear_all():
"""
Clears all callbacks for all signals
"""
for key in _receivers.keys():
_receivers[key].clear()
|
UTF-8
|
Python
| false | false | 2,014 |
154,618,841,892 |
e1a4c948297b1ca11940973d251e69845947651c
|
e99dddf5f96eb4a00a296f3d471826a32e2103b4
|
/binding.gyp
|
b0aeb1e24f72c7a270e7aca307d65b0c1f9fd7ee
|
[
"GPL-2.0-only"
] |
non_permissive
|
abe545/strider-msbuild-logger
|
https://github.com/abe545/strider-msbuild-logger
|
dc811f6ad6f334df37a0f63a9898464d428c4bb6
|
5509506a717a8fa39e2a148e3fb13d1c1bd2d4bc
|
refs/heads/master
| 2020-05-15T09:23:13.029071 | 2014-06-28T21:03:23 | 2014-06-28T21:03:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
{
'targets': [{
'target_name': 'strider-msbuild-logger',
'type': 'none',
'actions': [{
'action_name': 'compile',
'inputs': [ ],
'outputs': [ 'strider.msbuild.logger.dll' ],
'message': 'msbuild Strider.MsBuild.Logger.csproj',
'action': ['msbuild', 'Strider.MsBuild.Logger.csproj', '/nologo', '/tv:2.0', '/p:Configuration=Release']
}]
}]
}
|
UTF-8
|
Python
| false | false | 2,014 |
11,776,800,352,617 |
b2fdc7d330100fc565c7f3d45bdc8ebfc86daafc
|
1fbcfc5a67cd5549f78b2cdc28a12a305de95d46
|
/tasks/settings.py
|
9989383515abb91ed466c5a317c401d16dc5148d
|
[] |
no_license
|
kristjanr/tasks
|
https://github.com/kristjanr/tasks
|
7eae7bc89336742b0dc0355f3de3e19c469f90c4
|
c4a1d9633d22b0fe6564fcd328987c3c4f982515
|
refs/heads/master
| 2020-07-08T02:54:52.360647 | 2014-09-15T03:22:54 | 2014-09-15T03:22:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
Django settings for tasks project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
MEDIA_ROOT = BASE_DIR + "/media/"
MEDIA_URL = '/media/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qk@lhmatzr$3fu=eua$urd73me&7@7louxx4v5=6p20)+_2f%m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sidrun',
'django_summernote',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tasks.urls'
WSGI_APPLICATION = 'tasks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tasks',
'USER': os.getlogin(),
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Tallinn'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = 'sidrun/static/'
STATIC_URL = '/static/'
# more config options: https://github.com/lqez/django-summernote#options
SUMMERNOTE_CONFIG = {
# Change editor size
'width': '90%',
'height': '300',
'toolbar': [["style", ["style"]],
["font", ["bold", "italic", "underline", "superscript", "subscript", "strikethrough", "clear"]],
["fontname", ["fontname"]], ["color", ["color"]], ["para", ["ul", "ol", "paragraph"]],
["height", ["height"]], ["table", ["table"]], ["insert", ["link", "video", "hr"]],
["view", ["fullscreen", "codeview"]], ["help", ["help"]]]
}
|
UTF-8
|
Python
| false | false | 2,014 |
17,179,894,197 |
92be473cfd11adfd256d8df1c2b048d61776d91e
|
f45b557bb09af1a4cd126e97c8011cc33dd0931e
|
/yy.py
|
ce0a32fe2b3c16572560519048a6f78bfef9d279
|
[] |
no_license
|
hashem13-meet/MEET-YL1
|
https://github.com/hashem13-meet/MEET-YL1
|
cbebe0b2c0619cba88c6ca000f4edf36b58f6449
|
29c9b3327f51d738680be8f4c2bc02c4d510bef1
|
refs/heads/master
| 2020-06-08T16:29:45.432753 | 2014-02-20T18:02:39 | 2014-02-20T18:02:39 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n=int(raw_input('enter a number'))
range[1,n]
print range
|
UTF-8
|
Python
| false | false | 2,014 |
15,522,011,821,568 |
b48dc073c26527d7ed65f8942a62e05f26bdcf22
|
bfe589b18ed908391a86b9bea727c6ce658e6bcd
|
/pictureCalibration.py
|
38e2c8797d9a457a036ebe39ba3abf3a8713ca12
|
[] |
no_license
|
Ga-ryo/source
|
https://github.com/Ga-ryo/source
|
e9d72f585e81d2241b5f2ce929c7fb618d6d96e5
|
229d9cea980a399bb79245a3678a9c31c0215ef7
|
refs/heads/master
| 2017-04-26T15:29:32.692903 | 2014-09-14T05:58:05 | 2014-09-14T05:58:05 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from __future__ import division
import numpy as np
import facetracker
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import Image
import cv2
import argparse
import os.path
import dataHandle
def main(img_path):
#
# Load image
#
img = img = cv2.imread(img_path,0)
gray = img = cv2.imread(img_path,0)
img = np.asanyarray(img)
gray = np.asarray(gray)
#
# Load face model
#
conns = facetracker.LoadCon('../pyFaceTracker-0.1.1/external/FaceTracker/model/face.con')
trigs = facetracker.LoadTri('../pyFaceTracker-0.1.1/external/FaceTracker/model/face.tri')
tracker = facetracker.FaceTracker('../pyFaceTracker-0.1.1/external/FaceTracker/model/face.tracker')
#
# Search for faces in the image
#
tracker.setWindowSizes((11, 9, 7))
print type(gray)
print type(tracker)
if tracker.update(gray):
#img = tracker.draw(img, conns, trigs)
obj3D = tracker.get3DShape()
print obj3D,len(obj3D),type(obj3D),obj3D.dtype
#fig3d = plt.figure()
#ax = fig3d.add_subplot(111, projection='3d')
print len(obj3D[:66])
#ax.scatter(obj3D[:66], obj3D[66:132], obj3D[132:])
filename ,extension = os.path.splitext(os.path.basename(img_path))
dataHandle.insert(filename,obj3D[:66,0],obj3D[66:132,0],obj3D[132:,0])
#for i in range(66):
# ax.text(obj3D[i], obj3D[i+66], obj3D[i+132], str(i))
#ax.view_init(-90, -90)
else:
print 'Failed tracking face in image:' + os.path.basename(img_path)
#plt.figure()
#plt.imshow(img)
#plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run pyfacetrack on an image')
parser.add_argument('path', help='Path to image', default=None)
args = parser.parse_args()
main(args.path)
|
UTF-8
|
Python
| false | false | 2,014 |
19,207,093,759,707 |
6d48db0612f66d56e276c2ef8c1cf5fc5a70ea5d
|
407cc2ffbc207e7c286e414191a4261db4ba0763
|
/serenecl.py
|
d62e1e999a10f7673caf1fdf84905637c18b10b8
|
[] |
no_license
|
cjh1/serene
|
https://github.com/cjh1/serene
|
d22a6c8684777b26dcb598d58ac30c04672d2ef9
|
03da1b74e04be09d57ed87528a6abb8e055bfb79
|
refs/heads/master
| 2021-01-15T18:08:53.016927 | 2014-03-04T16:50:20 | 2014-03-04T16:50:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import serene
import argparse
import importlib
import sys
def main():
parser = argparse.ArgumentParser(description='Serene command line.')
parser.add_argument('module', metavar='module', type=str,
help='the module to process')
parser.add_argument('--host', dest='host', default='localhost',
help='host name')
parser.add_argument('--port', dest='port', default=8082, help='port')
parser.add_argument('--server', action='store_const', const='server', dest='action')
parser.add_argument('--doc', action='store_const', const='doc', dest='action')
args = parser.parse_args()
if not args.action:
print >> sys.stderr, "One of --server or --doc must be given"
return
importlib.import_module(args.module)
if args.action == 'server':
from bottle import run
run(reloader=True, host=args.host , port=args.port)
elif args.action == 'doc':
print serene.generate_doc()
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 2,014 |
11,811,160,075,036 |
82d960dab8e02cb42bd1285f38e8f77baa8438f0
|
778519ed510c8089d669b834e29e485c43ae10e9
|
/spook/__init__.py
|
f7957e3eb19e163f07ec844c9755e0a576eeea07
|
[] |
no_license
|
kelleyk/spook
|
https://github.com/kelleyk/spook
|
d3bc8d5a10dc9ce4fd8abaca81371b4acbfce0d5
|
6789bf855fbdf28bf5e6cb61e3e231e4b991c81a
|
refs/heads/master
| 2015-07-30T22:25:22 | 2012-06-11T21:50:41 | 2012-06-11T21:50:41 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from .main import main
from .spook import Spook
|
UTF-8
|
Python
| false | false | 2,012 |
19,146,964,248,522 |
10fb23bc83294a8a4be5fcb83cbd13e45138fc58
|
10389b92fc3fd4fd724cdc437595b25997d779e9
|
/oioioi/participants/models.py
|
de2372e3b93f7e10d09d0ffa8550338334e633e3
|
[
"GPL-3.0-only"
] |
non_permissive
|
CesarChodun/OIOIOI_site_developement_clone
|
https://github.com/CesarChodun/OIOIOI_site_developement_clone
|
661993676ce2f8c82f46a3be5ff2e5edfed67895
|
7f82fb16c9df658420375b7d2cfbcf56a78ceeaf
|
refs/heads/master
| 2020-08-27T07:13:03.177901 | 2014-03-17T12:31:59 | 2014-03-17T17:44:22 | 217,280,814 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from oioioi.base.fields import EnumRegistry, EnumField
from oioioi.base.utils.deps import check_django_app_dependencies
from oioioi.contests.models import Contest
from oioioi.participants.fields import \
OneToOneBothHandsCascadingParticipantField
check_django_app_dependencies(__name__, ['oioioi.contestexcl'])
participant_statuses = EnumRegistry()
participant_statuses.register('ACTIVE', _("Active"))
participant_statuses.register('BANNED', _("Banned"))
class Participant(models.Model):
contest = models.ForeignKey(Contest)
user = models.ForeignKey(User)
status = EnumField(participant_statuses, default='ACTIVE')
@property
def registration_model(self):
rcontroller = self.contest.controller.registration_controller()
model_class = rcontroller.get_model_class()
if model_class is None:
raise ObjectDoesNotExist
try:
return model_class.objects.get(participant=self)
except model_class.DoesNotExist:
raise ObjectDoesNotExist
class Meta(object):
unique_together = ('contest', 'user')
def __unicode__(self):
return unicode(self.user)
class RegistrationModel(models.Model):
participant = OneToOneBothHandsCascadingParticipantField(Participant,
related_name='%(app_label)s_%(class)s')
class Meta(object):
abstract = True
class TestRegistration(RegistrationModel):
"""Used only for testing"""
name = models.CharField(max_length=255)
|
UTF-8
|
Python
| false | false | 2,014 |
7,335,804,171,985 |
0686dcdcd4cbbaa7bb177c0cf7f649c85137c885
|
8104816dde92b5d11e83678595046ce5810f5a85
|
/org/i-david/wxpython/SimpleDemo.py
|
bfe045b66dc420e327f64f494794b36583034865
|
[] |
no_license
|
xtso520ok/wxpython-demo
|
https://github.com/xtso520ok/wxpython-demo
|
54689153521fd15d7f2b63c9c9426d38baa335a3
|
c10bdeefbcc0fda0dbd9f23e4a1439c43bf6fd9f
|
refs/heads/master
| 2018-07-22T12:21:43.103147 | 2013-07-26T08:33:23 | 2013-07-26T08:33:23 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2013年7月26日
@author: David
'''
import wx
class App(wx.App):
def OnInit(self):
frame = wx.Frame(parent=None, title='David for frame')
frame.Show()
return True
if __name__ == '__main__':
app = App()
app.MainLoop()
|
UTF-8
|
Python
| false | false | 2,013 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.