text
stringlengths 4
1.02M
| meta
dict |
---|---|
from CIM15.IEC61968.Metering.EndDeviceFunction import EndDeviceFunction
class GasMeteringFunction(EndDeviceFunction):
"""Functionality performed by a gas meter. It's entirely possible that the metering system would carry information to/from gas meters even though it was built primarily to carry the higher-value electric meter data.Functionality performed by a gas meter. It's entirely possible that the metering system would carry information to/from gas meters even though it was built primarily to carry the higher-value electric meter data.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'GasMeteringFunction' instance.
"""
super(GasMeteringFunction, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| {
"content_hash": "f143bc361ddb83f7f81c982ebc9e0157",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 431,
"avg_line_length": 44.89473684210526,
"alnum_prop": 0.7057444314185228,
"repo_name": "rwl/PyCIM",
"id": "bd596cef9ba149198baf803373195292b5b9ede9",
"size": "1953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Informative/InfMetering/GasMeteringFunction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from flask import Blueprint
from flask import abort, current_app, g, make_response, jsonify
from todo.models import Task
todo = Blueprint('todo', __name__)
@todo.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@todo.route('/', methods=['GET'])
def check_health():
return jsonify({'health': 'okay'})
@todo.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
"""Retrieve all tasks."""
return jsonify({'task': 1})
@todo.route('/todo/api/v1.0/tasks/<task_id>', methods=['GET'])
def get_task(task_id):
tasks = Task.query.all()
return jsonify(json_list=[todo.as_dict() for todo in tasks])
| {
"content_hash": "5073ec841963aa261ca5f8ca08b93b0a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 64,
"avg_line_length": 23.482758620689655,
"alnum_prop": 0.657856093979442,
"repo_name": "IamGianluca/todo",
"id": "540233156c7af32e6f20f79043da887db3d5bfe4",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "439"
},
{
"name": "Python",
"bytes": "15308"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
'''
PartsGenie (c) University of Manchester 2017
PartsGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import json
import sys
import time
from parts_genie.parts import PartsThread
from pathway_genie.pathway import PathwayGenie
class PartsGenieClient(object):
'''Simple client class for running PartsGenie jobs from JSON file.'''
def __init__(self):
self.__event = None
self.__pathway_genie = PathwayGenie()
def submit(self, filename, ice_params=None):
'''Submits PartsGenie job.'''
self.__event = None
results = []
with open(filename) as fle:
query = json.load(fle)
# Do job in new thread, return result when completed:
thread = PartsThread(query, idx=0, verbose=True)
thread.add_listener(self)
thread.start()
while len(results) < len(query['designs']):
if self.__event:
if self.__event['update']['status'] == 'finished':
results.append(self.__event['result'])
elif self.__event['update']['status'] == 'cancelled' or \
self.__event['update']['status'] == 'error':
results.append(None)
raise ValueError()
time.sleep(1)
if ice_params is not None:
# Saves results to ICE:
data = {'ice': {}}
data['ice']['url'] = ice_params[0]
data['ice']['username'] = ice_params[1]
data['ice']['password'] = ice_params[2]
data['ice']['groups'] = ice_params[3]
data['result'] = [val for result in results for val in result]
return self.__pathway_genie.save(data)
return results
def event_fired(self, event):
'''Responds to event being fired.'''
self.__event = event
def main(args):
'''main method.'''
client = PartsGenieClient()
ice_params = None if len(args) == 1 else args[1:]
client.submit(args[0], ice_params)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "ae8ef70c52b33ac72ed7d2d839e6b21f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 28.813333333333333,
"alnum_prop": 0.5659416936603424,
"repo_name": "synbiochem/PathwayGenie",
"id": "7ac66e624cb52bc0c37383cc484917fe64de3b75",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parts_genie/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1107428"
},
{
"name": "C++",
"bytes": "1037"
},
{
"name": "CSS",
"bytes": "3631"
},
{
"name": "EQ",
"bytes": "9986"
},
{
"name": "HTML",
"bytes": "36798"
},
{
"name": "JavaScript",
"bytes": "56150"
},
{
"name": "Makefile",
"bytes": "20423"
},
{
"name": "Python",
"bytes": "89576"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
} |
import sys
import time
import subprocess
from subprocess import Popen
from subprocess import PIPE
import json
import os
import re
import base64, uuid, io, codecs, mimetypes
import shutil
import shlex
import xml.dom.minidom
try:
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from httplib import HTTPSConnection
except ImportError:
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
from http.client import HTTPSConnection
from xml.dom.minidom import parse
def execCommand(command):
print("Executing " + command)
statementStatus = subprocess.call(command, shell=True)
if statementStatus == 1 :
print("Error executing " + command)
sys.exit("Error executing " + command)
return statementStatus
def deleteExistingApplication(applicationName):
if doesItExist("cf a ", applicationName, 0) :
deleteRequest = "cf delete -f -r " + applicationName
statementStatus = execCommand(deleteRequest)
if statementStatus == 1 :
time.sleep(5) # Delay for 5 seconds
execCommand(deleteRequest)
#check if really gone - POTENTIAL FOR INFINITE LOOP - Let the delete fail
#if doesItExist("cf a ", applicationName, 0) :
# print("Unable to delete an application, trying again : " +deleteRequest)
# time.sleep(5) # Delay for 5 seconds
# deleteExistingApplication(applicationName)
def deleteExistingService(serviceName):
if doesItExist("cf s ", serviceName, 0) :
deleteRequest = "cf delete-service -f " + serviceName
statementStatus = execCommand(deleteRequest)
if statementStatus == 1 :
time.sleep(5) # Delay for 5 seconds
execCommand(deleteRequest)
#check if really gone - POTENTIAL FOR INFINITE LOOP - Let the delete fail
#if doesItExist("cf s ", serviceName, 0) :
# print("Unable to delete an service, trying again: " +deleteRequest)
# deleteExistingService(serviceName)
def doesItExist(command, name, sectionNumber ) :
'''handle duplicates due to similar spellings, avoid using regular expressions'''
result, err, exitcode = call(command)
#print("Result = " + result)
#print("Err = " + err)
#print(exitcode)
rows = result.split('\n')
#print("Rows = ")
#print(rows)
#print(rows.encode('utf-8'))
if name in result:
print(name + " does EXIST")
return True
else:
print(name + " does not EXIST")
return False
#for row in rows:
# existingSection = row.split(" ")[sectionNumber]
# print(name)
# print("section= ")
# print(existingSection)
# if existingSection == name :
# return True
def createService(serviceName, serviceRequest):
print("Create service if it does not exist: " +serviceName)
print(serviceRequest)
if doesItExist("cf s ", serviceName, 0) :
print("Service Intance already exists:" + serviceName)
return None
else:
statementStatus = subprocess.call(serviceRequest, shell=True)
if statementStatus == 1 :
print("I am here 1")
print("Error creating a service: " +serviceName)
time.sleep(5) # Delay for 5 seconds
print("I am here after sleep 2")
statementStatus = subprocess.call(serviceRequest, shell=True)
if statementStatus == 1 :
print("Error creating a service: " +serviceName)
sys.exit("Error creating a service instance: " +serviceName)
else:
#does it really exist yet
print("I am here 2")
if not doesItExist("cf s ", serviceName, 0) :
time.sleep(5)
print("I am here after sleep 2")
createService(serviceName, serviceRequest)
def unbind(applicationName,serviceName):
if doesItExist("cf a ", applicationName, 0) and doesItExist("cf a ", serviceName, 0):
unbindRequest = "cf us " + applicationName + " " + serviceName
print(unbindRequest)
statementStatus = subprocess.call(unbindRequest, shell=True)
if statementStatus == 1 :
print("Error unbinding an application: " + unbindRequest)
time.sleep(5) # Delay for 5 seconds
statementStatus = subprocess.call(unbindRequest, shell=True)
if statementStatus == 1 :
print("Error unbinding an application: " + unbindRequest)
sys.exit("Error unbinding an application instance: " +applicationName + " from " + serviceName)
def call(cmd):
"""Runs the given command locally and returns the output, err and exit_code, handles Pipes."""
if "|" in cmd:
cmd_parts = cmd.split('|')
else:
cmd_parts = []
cmd_parts.append(cmd)
i = 0
p = {}
for cmd_part in cmd_parts:
cmd_part = cmd_part.strip()
if i == 0:
p[i]=Popen(shlex.split(cmd_part),stdin=None, stdout=PIPE, stderr=PIPE)
else:
p[i]=Popen(shlex.split(cmd_part),stdin=p[i-1].stdout, stdout=PIPE, stderr=PIPE)
i = i +1
(output, err) = p[i-1].communicate()
exit_code = p[0].wait()
return str(output).strip(), str(err), exit_code
# checkout submodules
def checkoutSubmodules():
print("Pulling Submodules for " + os.getcwd())
statementStatus = subprocess.call('git submodule init', shell=True)
if statementStatus == 1 :
sys.exit("Error when init submodule ")
statementStatus = subprocess.call('git submodule update --init --remote', shell=True)
if statementStatus == 1 :
sys.exit("Error when updating submodules")
return statementStatus
def updateGitModules(config):
print("CurrentDir " + os.getcwd())
if 'git@' in open('.gitmodules').read():
config.updateGitModules='true'
f1 = open('.gitmodules', 'r')
f2 = open('.gitmodules.script', 'w')
for line in f1:
line = line.replace(':', '/')
line = line.replace('git@', "https://")
f2.write(line)
f1.close()
f2.close()
shutil.copy(".gitmodules", ".gitmodules.bak")
shutil.copy(".gitmodules.script", ".gitmodules")
def restoreGitModules(config):
if ( os.path.isfile(".gitmodules.bak") ):
print("restoring .gitmodules")
shutil.copy(".gitmodules.bak", ".gitmodules")
def buildProject(mavenCommand,projectDir):
statementStatus = subprocess.call(mavenCommand, shell=True)
if statementStatus == 1 :
sys.exit("Error building the project "+projectDir)
return statementStatus
class MultipartFormdataEncoder(object):
def __init__(self):
self.boundary = "FILEBOUNDARY"
self.content_type = 'multipart/form-data; boundary={}'.format(self.boundary)
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, file-type) elements for data to be uploaded as files
Yield body's chunk as bytes
"""
encoder = codecs.getencoder('utf-8')
print(fields)
for (key, value) in fields:
key = self.u(key)
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for (key, filename, fpath) in files:
key = self.u(key)
filename = self.u(filename)
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename))
yield encoder('Content-Type: {}\r\n'.format(mimetypes.guess_type(filename)[0] or 'application/octet-stream'))
yield encoder('\r\n')
with open(fpath,'rb') as fd:
buff = fd.read()
yield (buff, len(buff))
yield encoder('\r\n')
yield encoder('--{}--\r\n'.format(self.boundary))
def encode(self, fields, files):
body = io.BytesIO()
for chunk, chunk_len in self.iter(fields, files):
body.write(chunk)
return self.content_type, body.getvalue()
def evaluatePom(config, cfCommand, projectDir):
try :
print("\tevaluate Pom")
curDir=os.getcwd()
print ("\tCurrent Directory = " + os.getcwd())
print ("\tProject Directory = " + projectDir)
os.chdir(projectDir)
print ("\tCurrent Directory = " + os.getcwd())
f = open("pom.xml", 'r')
f1 = f.read()
f.close()
print("\t============================")
artifactIdTemp=re.search(r'<artifactId[^>]*>([^<]+)</artifactId>', f1)
if artifactIdTemp:
print("\t" + artifactIdTemp.group(1))
config.artifactId=artifactIdTemp.group(1)
else:
sys.exit("Error getting artifactId from " + projectDir + "/pom.xml")
versionTemp=re.search(r'<version[^>]*>([^<]+)</version>', f1)
if versionTemp:
print("\t" + versionTemp.group(1))
config.jarVersion=versionTemp.group(1)
else:
sys.exit("Error getting jarVersion from " + projectDir + "/pom.xml")
print("\tArtifactId derived from pom.xml = " + config.artifactId)
print("\tJar Version derived from pom.xml=" + config.jarVersion)
finally:
print ("\tCurrent Directory = " + os.getcwd())
os.chdir(curDir)
print ("\tCurrent Directory = " + os.getcwd())
print ("\txxx")
def getJarFromArtifactory(config, cfCommand, projectDir):
print("\tFast install =" + config.fastinstall)
if config.fastinstall == 'y' :
print("\tretrieve jar from Artifactory")
print("\tartifactory repo=" + config.artifactoryrepo)
print("\tartifactory user =" + config.artifactoryuser)
#print("\tartifactory pass =" + config.artifactorypass)
curDir=os.getcwd()
print ("\tCurrent Directory = " + os.getcwd())
print ("\tProject Directory = " + projectDir)
print('\tmvnsettings=' + config.mvnsettings)
print('\tmavenRepo=' + config.mavenRepo)
evaluatePom(config, cfCommand, projectDir)
print("\tCopying artifacts..")
f = open(config.mvnsettings, 'r')
f1 = f.read()
f.close()
#print(f1)
found = 0
dom = parse(config.mvnsettings)
serverlist = dom.getElementsByTagName("server")
try :
print("\tChdir to " + projectDir + " Current Directory = " + os.getcwd())
os.chdir(projectDir)
print("\tCurrent Directory = " + os.getcwd())
print("")
for aServer in serverlist:
artifactory1 = aServer.getElementsByTagName("id")[0].firstChild.data
artifactoryuser = aServer.getElementsByTagName("username")[0].firstChild.data
artifactorypass = aServer.getElementsByTagName("password")[0].firstChild.data
print( "\tserver id === " + artifactory1 )
repolist = dom.getElementsByTagName("repository")
for aRepo in repolist:
artifactory2 = aRepo.getElementsByTagName("id")[0].firstChild.data
artifactoryrepo = aRepo.getElementsByTagName("url")[0].firstChild.data
print("\tREPOSITORY INFO :looking for=" + artifactory1 + " found=" + artifactory2 + ":" + artifactoryrepo)
if artifactory1 == artifactory2 :
print("\tArtifactory derived from maven settings.xml ==== " + artifactory2)
print("\tArtifactory url from maven settings.xml ==== " + artifactoryrepo)
print("\tArtifactory user derived from maven settings.xml ==== " + artifactoryuser)
#print("Artifactory pass derived from maven settings.xml ==== " + artifactorypass)
if artifactorypass.find("${") == 0 :
print("\tpassword is set to an environment variable that was not found, moving on to next entry")
else:
try:
os.stat("target")
except:
os.mkdir("target")
urlOfJar=artifactoryrepo + "/com/ge/predix/solsvc/" + config.artifactId + "/" + config.jarVersion + "/" + config.artifactId + "-" + config.jarVersion + ".jar"
print("/turlOfJar=" + urlOfJar)
request = Request(urlOfJar)
authString = artifactoryuser + ":" + artifactorypass
base64string = base64.b64encode(bytearray(authString, 'UTF-8')).decode("ascii")
request.add_header("Authorization", "Basic %s" % base64string)
try:
downloadFile="target/" + config.artifactId + "-" + config.jarVersion + ".jar"
print("\tDownloading " + downloadFile)
result = urlopen(request)
with open(downloadFile, "wb") as local_file:
local_file.write(result.read())
print("\tFrom: url: " + artifactoryrepo)
print("\tDownloading DONE")
print("\t============================")
found = 1
break
except URLError as err:
e = sys.exc_info()[1]
print("\tNot found in that repo, let's try another." + urlOfJar + " Error: %s" % e)
found = 0
continue
except HTTPError as err:
e = sys.exc_info()[1]
print("\tNot found in that repo, let's try another." + urlOfJar + " Error: %s" % e)
found = 0
continue
if found == 1:
break
finally:
print("\tCurrent Directory = " + os.getcwd())
os.chdir(curDir)
print("\tCurrent Directory = " + os.getcwd())
if found == 0:
sys.exit("\tError copying artifact "+projectDir)
def pushProject(config, appName, cfCommand, projectDir, checkIfExists):
print("****************** Running pushProject for "+ appName + " ******************" )
if checkIfExists == "true" :
#check if really gone
if doesItExist("cf a ", applicationName, 0) :
print(appName + " already exists, skipping push")
return
if config.fastinstall == 'y' :
getJarFromArtifactory(config, cfCommand, projectDir)
statementStatus = cfPush(appName, cfCommand)
return statementStatus
def cfPush(appName, cfCommand):
print("Deploying to CF..., Current Directory = " + os.getcwd())
print(cfCommand)
statementStatus = subprocess.call(cfCommand, shell=True)
if statementStatus == 1 :
sys.exit("Error deploying the project " + appName)
print("Deployment to CF done.")
return statementStatus
def createPredixUAASecurityService(config):
#create UAA instance
uaa_payload_filename = 'uaa_payload.json'
data = {}
data['adminClientSecret'] = config.uaaAdminSecret
#cross-os compatibility requires json to be in a file
with open(uaa_payload_filename, 'w') as outfile:
json.dump(data, outfile)
outfile.close()
uaaJsonrequest = "cf cs "+config.predixUaaService+" "+config.predixUaaServicePlan +" "+config.rmdUaaName+ " -c " + os.getcwd()+'/'+uaa_payload_filename
createService(config.rmdUaaName,uaaJsonrequest)
def getVcapJsonForPredixBoot (config):
print("cf env " + config.predixbootAppName)
predixBootEnv = subprocess.check_output(["cf", "env" ,config.predixbootAppName])
systemProvidedVars=predixBootEnv.decode('utf-8').split('System-Provided:')[1].split('No user-defined env variables have been set')[0]
config.formattedJson = "[" + systemProvidedVars.replace("\n","").replace("'","").replace("}{","},{") + "]"
#print ("formattedJson=" + config.formattedJson)
def addUAAUser(config, userId , password, email,adminToken):
createUserBody = {"userName":"","password":"","emails":[{"value":""}]}
createUserBody["userName"] = userId
createUserBody["password"] = password
createUserBody["emails"][0]['value'] = email
createUserBodyStr = json.dumps(createUserBody)
print(createUserBodyStr)
statementStatusJson = invokeURLJsonResponse(config.UAA_URI+"/Users", {"Content-Type": "application/json", "Authorization": adminToken}, createUserBodyStr, "")
if statementStatusJson.get('error'):
statementStatus = statementStatusJson['error']
statementStatusDesc = statementStatusJson['error_description']
else :
statementStatus = 'success'
#statementStatusDesc = statementStatusJson['id']
if statementStatus == 'success' or 'scim_resource_already_exists' not in statementStatusDesc :
print(userId + "User is UAA ")
else :
sys.exit("Error adding Users "+statementStatusDesc )
def invokeURLJsonResponse(url, headers, data, method):
responseCode = invokeURL(url, headers, data, method)
return json.loads(open("json_output.txt").read())
def invokeURL(url, headers1, data, method):
request = Request(url, headers=headers1)
if method :
request.get_method=lambda: method
print ("Invoking URL ----" + request.get_full_url())
print ("\tmethod ----" + request.get_method())
print ("\t" + str(request.header_items()))
print ("\tInput data=" + str(data))
responseCode = 0
try:
if data :
result = urlopen(request, data.encode('utf-8'))
else :
result = urlopen(request)
print (request.data)
with open("json_output.txt", "wb") as local_file:
local_file.write(result.read())
print ("\t*******OUTPUT**********" + open("json_output.txt").read())
responseCode = result.getcode()
print ("\tRESPONSE=" + str(responseCode))
print ("\t" + str(result.info()))
except URLError as err:
if err.code == 409:
e = sys.exc_info()[0]
print( "Resource found - continue: %s" % e)
with open("json_output.txt", "wt") as local_file:
local_file.write(json.dumps({'error': 'Resource found - continue','errorCode':+err.code,'error_description':'Resource found - continue'}))
print ("\t*******OUTPUT**********" + open("json_output.txt").read())
responseCode = err.code
elif err.code == 404:
e = sys.exc_info()[0]
print( "Resource not found - continue with create: %s" % e)
with open("json_output.txt", "wt") as local_file:
local_file.write(json.dumps({'error': 'Resource not found - continue','errorCode':+err.code,'error_description':'Resource not found - continue'}))
print ("\t*******OUTPUT**********" + open("json_output.txt").read())
responseCode = err.code
else :
e = sys.exc_info()[0]
print( "Error: %s" % e)
e = sys.exc_info()[1]
print( "Error: %s" % e)
sys.exit()
except HTTPError as err:
if err.code == 409:
e = sys.exc_info()[0]
print( "Resource found - continue: %s" % e)
with open("json_output.txt", "wt") as local_file:
local_file.write(json.dumps({'error': 'Resource found - continue','errorCode':+err.code,'error_description':'Resource found - continue'}))
print ("\t*******OUTPUT**********" + open("json_output.txt").read())
responseCode = err.code
elif err.code == 404:
e = sys.exc_info()[0]
print( "Resource not found - continue with create: %s" % e)
with open("json_output.txt", "wt") as local_file:
local_file.write(json.dumps({'error': 'Resource not found - continue','errorCode':+err.code,'error_description':'Resource not found - continue'}))
print ("\t*******OUTPUT**********" + open("json_output.txt").read())
responseCode = err.code
else :
e = sys.exc_info()[0]
print( "Error: %s" % e)
sys.exit()
print ("\tInvoking URL Complete----" + request.get_full_url())
print ("\tInvoking URL Complete with response code" + str(responseCode))
return responseCode
def createClientIdAndAddUser(config):
# setup the UAA login
adminToken = processUAAClientId(config,config.UAA_URI+"/oauth/clients","POST")
# Add users
print("****************** Adding users ******************")
addUAAUser(config, config.rmdUser1 , config.rmdUser1Pass, config.rmdUser1 + "@gegrctest.com",adminToken)
addUAAUser(config, config.rmdAdmin1 , config.rmdAdmin1Pass, config.rmdAdmin1 + "@gegrctest.com",adminToken)
def createBindPredixACSService(config, rmdAcsName):
acs_payload_filename = 'acs_payload.json'
data = {}
data['trustedIssuerIds'] = config.uaaIssuerId
with open(acs_payload_filename, 'w') as outfile:
json.dump(data, outfile)
outfile.close()
#create UAA instance
acsJsonrequest = "cf cs "+config.predixAcsService+" "+config.predixAcsServicePlan +" "+rmdAcsName+ " -c "+ os.getcwd()+'/'+ acs_payload_filename
print(acsJsonrequest)
statementStatus = subprocess.call(acsJsonrequest, shell=True)
if statementStatus == 1 :
sys.exit("Error creating a uaa service instance")
statementStatus = subprocess.call("cf bs "+config.predixbootAppName +" " + rmdAcsName , shell=True)
if statementStatus == 1 :
sys.exit("Error binding a uaa service instance to boot ")
return statementStatus
def createGroup(config, adminToken,policyGrp):
print("****************** Add Group ******************")
createGroupBody = {"displayName":""}
createGroupBody["displayName"] = policyGrp
createGroupBodyStr = json.dumps(createGroupBody)
print(createGroupBodyStr)
statementStatusJson = invokeURLJsonResponse(config.UAA_URI+"/Groups", {"Content-Type": "application/json", "Authorization": adminToken}, createGroupBodyStr, "")
if statementStatusJson.get('error'):
statementStatus = statementStatusJson['error']
statementStatusDesc = statementStatusJson['error_description']
else :
statementStatus = 'success'
statementStatusDesc = 'success'
if statementStatus == 'success' or 'scim_resource_exists' not in statementStatusDesc :
print("Success creating or reusing the Group")
else :
sys.exit("Error Processing Adding Group on UAA "+statementStatusDesc )
def getGroupOrUserByDisplayName(uri, adminToken):
getResponseJson=invokeURLJsonResponse(uri, {"Content-Type": "application/json", "Authorization": adminToken}, "", "")
found = True
statementStatus = 'success'
if getResponseJson.get('totalResults') <=0 :
statementStatus = 'not found'
found = False
return found, getResponseJson
def getGroup(config, adminToken ,grpname):
return getGroupOrUserByDisplayName(config.UAA_URI+ "/Groups/?filter=displayName+eq+%22" + grpname + "%22&startIndex=1", adminToken)
def getUserbyDisplayName(config, adminToken ,username):
return getGroupOrUserByDisplayName(config.UAA_URI+ "/Users/?attributes=id%2CuserName&filter=userName+eq+%22" + username + "%22&startIndex=1", adminToken)
def addAdminUserPolicyGroup(config, policyGrp,userName):
adminToken = getTokenFromUAA(config, 1)
if not adminToken :
sys.exit("Error getting admin token from the UAA instance ")
#check Get Group
groupFound,groupJson = getGroup(config, adminToken,policyGrp)
if not groupFound :
createGroup(config,adminToken,policyGrp)
groupFound,groupJson = getGroup(config, adminToken,policyGrp)
userFound,userJson = getUserbyDisplayName(config,adminToken,userName)
if not userFound :
sys.exit(" User is not found in the UAA - error adding member to the group")
members = []
if groupJson.get('resources') :
grpName = groupJson['resources'][0]
if grpName.get('members') :
groupMeberList = grpName.get('members')
for groupMeber in groupMeberList:
members.insert(0 ,groupMeber['value'])
members.insert(0, userJson['resources'][0]['id'])
print (' Member to be updated for the Group ,'.join(members))
#update Group
groupId = groupJson['resources'][0]['id']
updateGroupBody = { "meta": {}, "schemas": [],"members": [],"id": "","displayName": ""}
updateGroupBody["meta"] = groupJson['resources'][0]['meta']
updateGroupBody["members"] = members
updateGroupBody["displayName"] = groupJson['resources'][0]['displayName']
updateGroupBody["schemas"] = groupJson['resources'][0]['schemas']
updateGroupBody["id"] = groupId
updateGroupBodyStr = json.dumps(updateGroupBody)
uuaGroupURL = config.UAA_URI + "/Groups/"+groupId
statementStatusJson = invokeURLJsonResponse(uuaGroupURL, {"Content-Type": "application/json", "Authorization": "%s" %adminToken, "if-match" : "*", "accept" : "application/json"}, updateGroupBodyStr, "PUT")
if statementStatusJson.get('error'):
statementStatus = statementStatusJson['error']
statementStatusDesc = statementStatusJson['error_description']
else :
statementStatus = 'success'
statementStatusDesc = 'success'
if statementStatus == 'success' or 'Client already exists' not in statementStatusDesc :
print ("User Successful adding " +userName + " to the group "+policyGrp)
else :
sys.exit("Error adding " +userName + " to the group "+policyGrp + " statementStatusDesc=" + statementStatusDesc )
def updateUserACS(config):
addAdminUserPolicyGroup(config, "acs.policies.read",config.rmdAdmin1)
addAdminUserPolicyGroup(config, "acs.policies.write",config.rmdAdmin1)
addAdminUserPolicyGroup(config, "acs.attributes.read",config.rmdAdmin1)
addAdminUserPolicyGroup(config, "acs.attributes.write",config.rmdAdmin1)
addAdminUserPolicyGroup(config, "acs.policies.read",config.rmdUser1)
addAdminUserPolicyGroup(config, "acs.attributes.read",config.rmdUser1)
def processUAAClientId (config,uuaClientURL,method):
adminToken = getTokenFromUAA(config, 1)
if not adminToken :
sys.exit("Error getting admin token from the UAA instance ")
print(config.clientScope)
print(config.clientScopeList)
createClientIdBody = {"client_id":"","client_secret":"","scope":[],"authorized_grant_types":[],"authorities":[],"autoapprove":["openid"]}
createClientIdBody["client_id"] = config.rmdAppClientId
createClientIdBody["client_secret"] = config.rmdAppSecret
createClientIdBody["scope"] = config.clientScopeList
createClientIdBody["authorized_grant_types"] = config.clientGrantType
createClientIdBody["authorities"] = config.clientAuthoritiesList
createClientIdBodyStr = json.dumps(createClientIdBody)
print("****************** Creating client id ******************")
# check if the client exists
uaaClientResponseJson = invokeURLJsonResponse(config.UAA_URI+"/oauth/clients/"+config.rmdAppClientId, {"Content-Type": "application/json", "Authorization": adminToken}, '', 'GET')
print("reponse from get client "+str(uaaClientResponseJson))
if uaaClientResponseJson.get('error'):
# failure since client does not exits, create the client
uaaClientResponseJson = invokeURLJsonResponse(uuaClientURL, {"Content-Type": "application/json", "Authorization": adminToken}, createClientIdBodyStr, method)
if uaaClientResponseJson.get('error'):
statementStatus = uaaClientResponseJson['error']
statementStatusDesc = uaaClientResponseJson['error_description']
else :
statementStatus = 'success'
statementStatusDesc = 'success'
else :
statementStatus = 'success'
statementStatusDesc = 'success'
if statementStatus == 'success' or 'Client already exists' in statementStatusDesc :
print("Success creating or reusing the Client Id")
# setting client details on config
config.clientScopeList=uaaClientResponseJson.get('scope')
config.clientGrantType=uaaClientResponseJson.get('authorized_grant_types')
config.clientAuthoritiesList=uaaClientResponseJson.get('authorities')
else :
sys.exit("Error Processing ClientId on UAA "+statementStatusDesc )
return adminToken
def updateClientIdAuthorities(config):
adminToken = getTokenFromUAA(config, 1)
if not adminToken :
sys.exit("Error getting admin token from the UAA instance ")
print(config.clientScope)
print(config.clientScopeList)
createClientIdBody = {"client_id":"","client_secret":"","scope":[],"authorized_grant_types":[],"authorities":[],"autoapprove":["openid"]}
createClientIdBody["client_id"] = config.rmdAppClientId
createClientIdBody["client_secret"] = config.rmdAppSecret
createClientIdBody["scope"] = config.clientScopeList
createClientIdBody["authorized_grant_types"] = config.clientGrantType
createClientIdBody["authorities"] = config.clientAuthoritiesList
createClientIdBodyStr = json.dumps(createClientIdBody)
print("****************** Updating client id ******************")
uaaClientResponseJson = invokeURLJsonResponse(config.UAA_URI+"/oauth/clients/"+config.rmdAppClientId, {"Content-Type": "application/json", "Authorization": adminToken}, createClientIdBodyStr, "PUT")
if uaaClientResponseJson.get('error'):
statementStatus = uaaClientResponseJson['error']
statementStatusDesc = uaaClientResponseJson['error_description']
else :
statementStatus = 'success'
statementStatusDesc = 'success'
#processUAAClientId(config,config.UAA_URI+"/oauth/clients/"+config.rmdAppClientId,"PUT")
def getTokenFromUAA(config, isAdmin):
realmStr=""
if isAdmin == 1:
realmStr = "admin:"+config.uaaAdminSecret
else :
realmStr = config.rmdAppClientId+":"+config.rmdAppSecret
authKey = base64.b64encode(bytearray(realmStr, 'UTF-8')).decode("ascii")
queryClientCreds= "grant_type=client_credentials"
getClientTokenResponseJson=invokeURLJsonResponse(config.uaaIssuerId + "?" + queryClientCreds, {"Content-Type": "application/x-www-form-urlencoded", "Authorization": "Basic %s" % authKey}, "", "")
print("Client Token is "+getClientTokenResponseJson['token_type']+" "+getClientTokenResponseJson['access_token'])
return (getClientTokenResponseJson['token_type']+" "+getClientTokenResponseJson['access_token'])
def createRefAppACSPolicyAndSubject(config,acs_zone_header):
adminUserTOken = getTokenFromUAA(config, 0)
acsJsonResponse = invokeURLJsonResponse(config.ACS_URI+'/v1/policy-set/'+config.acsPolicyName, {"Content-Type": "application/json", "Authorization": "%s" %adminUserTOken, "Predix-Zone-Id" : "%s" %acs_zone_header},"", "GET")
print("ACS JSON Response"+str(acsJsonResponse))
if acsJsonResponse.get('error'):
statementStatusDesc = acsJsonResponse['error_description']
statementStatus = 'not-found'
else :
statementStatus = 'success'
if('not-found' == statementStatus):
invokeURL(config.ACS_URI+'/v1/policy-set/'+config.acsPolicyName, {"Content-Type": "application/json", "Authorization": "%s" %adminUserTOken, "Predix-Zone-Id" : "%s" %acs_zone_header}, open("./acs/rmd_app_policy.json").read(), "PUT")
#acsSubjectCurl = 'curl -X PUT "'+config.ACS_URI+'/v1/subject/' + config.rmdAdmin1 + '"' + ' -d "@./acs/' + config.rmdAdmin1 + '_role_attribute.json"'+headers
invokeURL(config.ACS_URI+'/v1/subject/' + config.rmdAdmin1, {"Content-Type": "application/json", "Authorization": "%s" %adminUserTOken, "Predix-Zone-Id" : "%s" %acs_zone_header}, open("./acs/" + config.rmdAdmin1 + "_role_attribute.json").read(), "PUT")
#acsSubjectCurl = 'curl -X PUT "'+config.ACS_URI+'/v1/subject/' + config.rmdUser1 + '"' + ' -d "@./acs/"' + config.rmdUser1 + '"_role_attribute.json"'+headers
invokeURL(config.ACS_URI+'/v1/subject/' + config.rmdUser1, {"Content-Type": "application/json", "Authorization": "%s" %adminUserTOken, "Predix-Zone-Id" : "%s" %acs_zone_header}, open("./acs/" + config.rmdUser1+ "_role_attribute.json").read(), "PUT")
def createAsssetInstance(config,rmdPredixAssetName ,predixAssetName ):
getPredixUAAConfigfromVcaps(config)
asset_payload_filename = 'asset_payload.json'
uaaList = [config.uaaIssuerId]
data = {}
data['trustedIssuerIds'] = uaaList
with open(asset_payload_filename, 'w') as outfile:
json.dump(data, outfile)
print(data)
outfile.close()
request = "cf cs "+predixAssetName+" "+config.predixAssetServicePlan +" "+rmdPredixAssetName+ " -c "+os.getcwd()+'/' +asset_payload_filename
print ("Creating Service cmd "+request)
statementStatus = subprocess.call(request, shell=True)
#if statementStatus == 1 :
#sys.exit("Error creating a assset service instance")
def createTimeSeriesInstance(config,rmdPredixTimeSeriesName,predixTimeSeriesName):
timeSeries_payload_filename = 'timeseries_payload.json'
uaaList = [config.uaaIssuerId]
data = {}
data['trustedIssuerIds'] =uaaList
with open(timeSeries_payload_filename, 'w') as outfile:
json.dump(data, outfile)
outfile.close()
tsJsonrequest = "cf cs "+predixTimeSeriesName+" "+config.predixTimeSeriesServicePlan +" "+rmdPredixTimeSeriesName+ " -c "+os.getcwd()+'/'+timeSeries_payload_filename
print ("Creating Service cmd "+tsJsonrequest)
statementStatus = subprocess.call(tsJsonrequest, shell=True)
if statementStatus == 1 :
sys.exit("Error creating a assset service instance")
def createAnalyticsRuntimeInstance(config,rmdPredixAnalyticsRuntime, predixAnalyticsRuntime):
print("Creating Analytics runtime instance..")
getPredixUAAConfigfromVcaps(config)
asset_payload_filename = 'asset_payload.json'
uaaList = [config.uaaIssuerId]
data = {}
data['trustedIssuerIds'] = uaaList
with open(asset_payload_filename, 'w') as outfile:
json.dump(data, outfile)
print(data)
outfile.close()
request = "cf cs "+predixAnalyticsRuntime+" "+config.predixAnalyticsRuntimePlan +" "+rmdPredixAnalyticsRuntime+ " -c "+os.getcwd()+'/' +asset_payload_filename
print ("Creating Service cmd "+request)
statementStatus = subprocess.call(request, shell=True)
#if statementStatus == 1 :
#sys.exit("Error creating a assset service instance")
def createAnalyticsCatalogInstance(config,rmdPredixAnalyticsCatalog, predixAnalyticsCatalog):
print("Creating Analytics catalog instance..")
getPredixUAAConfigfromVcaps(config)
asset_payload_filename = 'asset_payload.json'
uaaList = [config.uaaIssuerId]
data = {}
data['trustedIssuerIds'] = uaaList
with open(asset_payload_filename, 'w') as outfile:
json.dump(data, outfile)
print(data)
outfile.close()
request = "cf cs "+predixAnalyticsCatalog+" "+config.predixAnalyticsCatalogPlan +" "+rmdPredixAnalyticsCatalog+ " -c "+os.getcwd()+'/' +asset_payload_filename
print ("Creating Service cmd "+request)
statementStatus = subprocess.call(request, shell=True)
#if statementStatus == 1 :
#sys.exit("Error creating a assset service instance")
def createRabbitMQInstance(config):
print("Creating Rabbit MQ instance..")
request = "cf cs "+config.predixRabbitMQ+" "+config.predixRabbitMQPlan +" "+config.rmdRabbitMQ
print ("Creating Service cmd "+request)
statementStatus = subprocess.call(request, shell=True)
#if statementStatus == 1 :
#sys.exit("Error creating a assset service instance")
def getPredixUAAConfigfromVcaps(config):
if not hasattr(config,'uaaIssuerId') :
getVcapJsonForPredixBoot(config)
d = json.loads(config.formattedJson)
config.uaaIssuerId = d[0]['VCAP_SERVICES'][config.predixUaaService][0]['credentials']['issuerId']
config.UAA_URI = d[0]['VCAP_SERVICES'][config.predixUaaService][0]['credentials']['uri']
uaaZoneHttpHeaderName = d[0]['VCAP_SERVICES'][config.predixUaaService][0]['credentials']['zone']['http-header-name']
uaaZoneHttpHeaderValue = d[0]['VCAP_SERVICES'][config.predixUaaService][0]['credentials']['zone']['http-header-value']
print("****************** UAA configured As ******************")
print ("\n uaaIssuerId = " + config.uaaIssuerId + "\n UAA_URI = " + config.UAA_URI + "\n "+uaaZoneHttpHeaderName+" = " +uaaZoneHttpHeaderValue+"\n")
print("****************** ***************** ******************")
def getPredixACSConfigfromVcaps(config):
if not hasattr(config,'ACS_URI') :
getVcapJsonForPredixBoot(config)
d = json.loads(config.formattedJson)
config.ACS_URI = d[0]['VCAP_SERVICES'][config.predixAcsService][0]['credentials']['uri']
config.acsPredixZoneHeaderName = d[0]['VCAP_SERVICES'][config.predixAcsService][0]['credentials']['zone']['http-header-name']
config.acsPredixZoneHeaderValue = d[0]['VCAP_SERVICES'][config.predixAcsService][0]['credentials']['zone']['http-header-value']
config.acsOauthScope = d[0]['VCAP_SERVICES'][config.predixAcsService][0]['credentials']['zone']['oauth-scope']
def bindService(applicationName , rmdServiceInstanceName):
statementStatus = subprocess.call("cf bs "+applicationName +" " + rmdServiceInstanceName , shell=True)
if statementStatus == 1 :
sys.exit("Error binding a "+rmdServiceInstanceName+" service instance to boot ")
def restageApplication(applicationName):
statementStatus = subprocess.call("cf restage "+applicationName, shell=True)
if statementStatus == 1 :
sys.exit("Error restaging a uaa service instance to boot")
def getAnalyticsRuntimeURLandZone(config):
if not hasattr(config,'ANALYTICRUNTIME_ZONE') :
print("parsing analytics runtime zone and uri from vcap")
analyticsRuntimeUri = ''
analyticsRuntimeZone = ''
d = json.loads(config.formattedJson)
analyticsRuntimeZone = d[0]['VCAP_SERVICES'][config.predixAnalyticsRuntime][0]['credentials']['zone-http-header-value']
analyticsRuntimeUri = d[0]['VCAP_SERVICES'][config.predixAnalyticsRuntime][0]['credentials']['execution_uri']
if "https" in analyticsRuntimeUri:
config.ANALYTICRUNTIME_URI = analyticsRuntimeUri.split('https://')[1].strip()
else :
config.ANALYTICRUNTIME_URI = analyticsRuntimeUri.split('http://')[1].strip()
config.ANALYTICRUNTIME_ZONE = analyticsRuntimeZone
def getAnalyticsCatalogURLandZone(config):
if not hasattr(config,'CATALOG_ZONE') :
catalogUri = ''
catalogZone = ''
d = json.loads(config.formattedJson)
catalogZone = d[0]['VCAP_SERVICES'][config.predixAnalyticsCatalog][0]['credentials']['zone-http-header-value']
catalogUri = d[0]['VCAP_SERVICES'][config.predixAnalyticsCatalog][0]['credentials']['catalog_uri']
if "https" in catalogUri:
config.CATALOG_URI = catalogUri.split('https://')[1].strip()
else :
config.CATALOG_URI = catalogUri.split('http://')[1].strip()
config.CATALOG_ZONE = catalogZone
def getAssetURLandZone(config):
if not hasattr(config,'ASSET_ZONE') :
assetUrl = ''
assetZone =''
d = json.loads(config.formattedJson)
assetZone = d[0]['VCAP_SERVICES'][config.predixAssetService][0]['credentials']['instanceId']
assetUrl = d[0]['VCAP_SERVICES'][config.predixAssetService][0]['credentials']['uri']
config.ASSET_ZONE = assetZone
config.ASSET_URI = assetUrl
def getTimeseriesURLandZone(config):
if not hasattr(config,'TS_ZONE') :
timeseriesUrl = ''
timeseriesZone =''
d = json.loads(config.formattedJson)
timeseriesZone = d[0]['VCAP_SERVICES'][config.predixTimeSeriesService][0]['credentials']['query']['zone-http-header-value']
timeseriesUrl = d[0]['VCAP_SERVICES'][config.predixTimeSeriesService][0]['credentials']['query']['uri']
config.TS_ZONE = timeseriesZone
config.TS_URI = timeseriesUrl
def getClientAuthoritiesforAssetAndTimeSeriesService(config):
d = json.loads(config.formattedJson)
config.assetScopes = config.predixAssetService+".zones."+d[0]['VCAP_SERVICES'][config.predixAssetService][0]['credentials']['instanceId']+".user"
#get Ingest authorities
tsInjest = d[0]['VCAP_SERVICES'][config.predixTimeSeriesService][0]['credentials']['ingest']
config.timeSeriesInjestScopes = tsInjest['zone-token-scopes'][0] +"," + tsInjest['zone-token-scopes'][1]
# get query authorities
tsQuery = d[0]['VCAP_SERVICES'][config.predixTimeSeriesService][0]['credentials']['query']
config.timeSeriesQueryScopes = tsQuery['zone-token-scopes'][0] +"," + tsQuery['zone-token-scopes'][1]
if hasattr(config,'ANALYTICRUNTIME_ZONE') :
config.analyticRuntimeScopes = "analytics.zones." + config.ANALYTICRUNTIME_ZONE + ".user"
#config.catalogScopes = "analytics.zones." + config.CATALOG_ZONE + ".user"
config.clientAuthoritiesList.append(config.assetScopes)
config.clientAuthoritiesList.append(config.timeSeriesInjestScopes)
config.clientAuthoritiesList.append(config.timeSeriesQueryScopes)
if hasattr(config,'analyticRuntimeScopes') :
config.clientAuthoritiesList.append(config.analyticRuntimeScopes)
#config.clientAuthoritiesList.append(config.catalogScopes)
config.clientScopeList.append(config.assetScopes)
config.clientScopeList.append(config.timeSeriesInjestScopes)
config.clientScopeList.append(config.timeSeriesQueryScopes)
if hasattr(config,'analyticRuntimeScopes') :
config.clientScopeList.append(config.analyticRuntimeScopes)
#config.clientScopeList.append(config.catalogScopes)
print ("returning timeseries client zone scopes query -->"+config.timeSeriesQueryScopes + " timeSeriesInjestAuthorities -->"+config.timeSeriesInjestScopes )
def updateUAAUserGroups(config, serviceGroups):
groups = serviceGroups.split(",")
#print (groups)
for group in groups:
#print (group)
addAdminUserPolicyGroup(config, group,config.rmdAdmin1Pass)
addAdminUserPolicyGroup(config, group,config.rmdUser1Pass)
def findRedisService(config):
#setup Redis
result = []
process = subprocess.Popen('cf m',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE )
for line in process.stdout:
result.append(line)
errcode = process.returncode
#print (errcode)
search_redis = config.predixRedis
for line in result:
line1 = line.decode('utf-8')
if(line1.find(search_redis) > -1):
#print(line)
config.predixRedis = line1.split()[0].strip()
print ("Setting Redis config.predixRedis as ")
print (config.predixRedis)
def getAuthorities(config):
if not hasattr(config,'clientAuthoritiesList') :
config.clientAuthoritiesList = list(config.clientAuthorities)
config.clientScopeList = list(config.clientScope)
def updateClientAuthoritiesACS(config):
getPredixACSConfigfromVcaps(config)
# get ACS scopes
config.clientAuthoritiesList.append(config.acsOauthScope)
config.clientScopeList.append(config.acsOauthScope)
# merge with exisiting client
config.clientAuthoritiesList = config.clientAuthorities + list(set(config.clientAuthoritiesList) - set(config.clientAuthorities))
config.clientScopeList = config.clientScope + list(set(config.clientScopeList) - set(config.clientScope))
| {
"content_hash": "f6775e67c1fd302e6338fddd2074466f",
"timestamp": "",
"source": "github",
"line_count": 968,
"max_line_length": 253,
"avg_line_length": 41.836776859504134,
"alnum_prop": 0.708430045928194,
"repo_name": "emanuelbadaceanu/Thoth",
"id": "a4b540427bb311a2df6ada85c647cafe519d2fed",
"size": "40498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "predix machine edison/predix scripts/python/predix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1704"
},
{
"name": "HTML",
"bytes": "17136"
},
{
"name": "Java",
"bytes": "34130"
},
{
"name": "JavaScript",
"bytes": "73707"
},
{
"name": "Python",
"bytes": "40498"
},
{
"name": "Shell",
"bytes": "184256"
}
],
"symlink_target": ""
} |
import datetime
import sys
import os
# Add flask_velox to the Path
root = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
)
)
sys.path.append(os.path.join(root, 'doit'))
now = datetime.datetime.utcnow()
year = now.year
version = open(os.path.join(root, 'VERSION')).read().splitlines()[0]
import flask_store # noqa
# Project details
project = u'Flask-Store'
copyright = u'{0}, Soon London Ltd'.format(year)
version = version
release = version
# Sphinx Config
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinxcontrib.napoleon']
exclude_patterns = []
# Theme
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes', ]
html_static_path = ['_static', ]
html_theme = 'kr'
html_sidebars = {
'index': ['sidebar_intro.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html'],
'**': ['sidebar_intro.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
| {
"content_hash": "d20ddf0d93326f79abd42d298685a64f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 22.78,
"alnum_prop": 0.6321334503950834,
"repo_name": "spacedogXYZ/Flask-Store",
"id": "ac77ccf502dd694bbfe5e380276dc9f8c3e6d5ee",
"size": "1164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "892"
},
{
"name": "Python",
"bytes": "33333"
}
],
"symlink_target": ""
} |
"""
Program to test hdf5 functionality.
It works.
"""
import numpy as np
import h5py
import matplotlib.pyplot as plt
import subprocess, os
def writeHdf5(filename):
f=h5py.File(filename, "w")
sampleValues= np.array([1.1,17.0,6.2])
f["sampleValues"]= sampleValues
f.close()
def dumpHdf5(filename):
"""Method to execute xyce"""
h5dumpBin = "/usr/local/bin/h5dump"
h5dumpCmd = [h5dumpBin, filename]
thisEnv = os.environ.copy()
proc= subprocess.Popen(h5dumpCmd, env=thisEnv)
return proc
def readHdf5(filename):
f=h5py.File(filename, "r")
data= f['sampleValues']
print "sampleValues"
for v in data:
print v
print "Done"
f.close()
def main():
filename= "hdf5ex_dat.hdf5"
writeHdf5(filename)
proc= dumpHdf5(filename)
proc.wait()
readHdf5(filename)
main()
| {
"content_hash": "f6fda5ea9a387494f3a7b2272a7e9b48",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 48,
"avg_line_length": 18.674418604651162,
"alnum_prop": 0.6911581569115816,
"repo_name": "tomacorp/thermapythia",
"id": "e2ec15e296eb0681b55c9ccf6b051b48b91db58f",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thermpy/hdf5ex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "5464"
},
{
"name": "JavaScript",
"bytes": "9020"
},
{
"name": "Python",
"bytes": "403634"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
import script_chdir
from evaluation.eval_script import evaluate_models_xval, print_results, EvalModel
from hybrid_model.dataset import get_dataset
# Get dataset
dataset = get_dataset('ml100k')
# dataset = get_dataset('ml1m')
models = []
# Hybrid Model
from hybrid_model.hybrid import HybridModel
from hybrid_model.config import hybrid_config
model_type = HybridModel
config = hybrid_config
models.append(EvalModel(model_type.__name__, model_type, config))
results = evaluate_models_xval(dataset, models, coldstart=False, n_fold=10)
print('Normal 10-fold')
print_results(results)
"""
Normal 10-fold
------- HybridModel
=== Part full
rmse: 0.8908 ± 0.0044 prec@5: 0.8756 ± 0.0035
------- HybridModel_SVDpp
=== Part full
rmse: 0.8953 ± 0.0048 prec@5: 0.8747 ± 0.0031
------- HybridModel_AttributeBiasExperimental
=== Part full
rmse: 0.9235 ± 0.0041 prec@5: 0.8633 ± 0.0043
"""
| {
"content_hash": "68a889d4c31b4d6f28890567d7256f23",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.7231638418079096,
"repo_name": "sbremer/hybrid_rs",
"id": "67a3c37e2e839c6b169426079a37258bf35dc37c",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/test_10fold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "126956"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from disco.bot.command import CommandError
UNITS = {
's': lambda v: v,
'm': lambda v: v * 60,
'h': lambda v: v * 60 * 60,
'd': lambda v: v * 60 * 60 * 24,
'w': lambda v: v * 60 * 60 * 24 * 7,
}
def parse_duration(raw, source=None, negative=False, safe=False):
if not raw:
if safe:
return None
raise CommandError('Invalid duration')
value = 0
digits = ''
for char in raw:
if char.isdigit():
digits += char
continue
if char not in UNITS or not digits:
if safe:
return None
raise CommandError('Invalid duration')
value += UNITS[char](int(digits))
digits = ''
if negative:
value = value * -1
return (source or datetime.utcnow()) + timedelta(seconds=value + 1)
def humanize_duration(duration, format='full'):
now = datetime.utcnow()
if isinstance(duration, timedelta):
if duration.total_seconds() > 0:
duration = datetime.today() + duration
else:
duration = datetime.utcnow() - timedelta(seconds=duration.total_seconds())
diff_delta = duration - now
diff = int(diff_delta.total_seconds())
minutes, seconds = divmod(diff, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
units = [weeks, days, hours, minutes, seconds]
if format == 'full':
unit_strs = ['week', 'day', 'hour', 'minute', 'second']
elif format == 'short':
unit_strs = ['w', 'd', 'h', 'm', 's']
expires = []
for x in range(0, 5):
if units[x] == 0:
continue
else:
if format == 'short':
expires.append('{}{}'.format(units[x], unit_strs[x]))
elif units[x] > 1:
expires.append('{} {}s'.format(units[x], unit_strs[x]))
else:
expires.append('{} {}'.format(units[x], unit_strs[x]))
return ', '.join(expires)
| {
"content_hash": "0c88aae207d39d0478c020f66661674a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 86,
"avg_line_length": 27.986486486486488,
"alnum_prop": 0.5364558184451955,
"repo_name": "ThaTiemsz/jetski",
"id": "1c0f80997d12ddd8f7fd81893657ae289a1e5cb2",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rowboat/util/input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21353"
},
{
"name": "Dockerfile",
"bytes": "582"
},
{
"name": "HTML",
"bytes": "8328"
},
{
"name": "JavaScript",
"bytes": "84449"
},
{
"name": "Makefile",
"bytes": "323"
},
{
"name": "Python",
"bytes": "372586"
},
{
"name": "Shell",
"bytes": "660"
}
],
"symlink_target": ""
} |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
DEPRECATED: use the pd.SparseSeries(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.SparseSeries(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls._from_array(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@classmethod
def _from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to a Series.
Parameters
----------
sparse_only: bool, default False
DEPRECATED: this argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Series
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['take'])
def take(self, indices, axis=0, convert=None, *args, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
@Appender(generic._shared_docs['isna'])
def isna(self):
arr = SparseArray(isna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
isnull = isna
@Appender(generic._shared_docs['notna'])
def notna(self):
arr = SparseArray(notna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
notnull = notna
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().dropna()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isna(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isna(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
| {
"content_hash": "8bb4f5c5ac6b4b0b47b8af1f22a09108",
"timestamp": "",
"source": "github",
"line_count": 869,
"max_line_length": 79,
"avg_line_length": 34.64326812428078,
"alnum_prop": 0.5387477163261917,
"repo_name": "zfrenchee/pandas",
"id": "8a38b1054a1f5cdb38c469a0c242459e7953f81b",
"size": "30105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/sparse/series.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "470171"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "989"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "12658422"
},
{
"name": "Shell",
"bytes": "25785"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import logging
import sys
import warnings
from django.conf import compat_patch_logging_config, LazySettings
from django.core import mail
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils.log import CallbackFilter, RequireDebugFalse
from django.utils.six import StringIO
from django.utils.unittest import skipUnless
from ..admin_scripts.tests import AdminScriptTestCase
PYVERS = sys.version_info[:2]
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class PatchLoggingConfigTest(TestCase):
"""
Tests for backward-compat shim for #16288. These tests should be removed in
Django 1.6 when that shim and DeprecationWarning are removed.
"""
def test_filter_added(self):
"""
Test that debug-false filter is added to mail_admins handler if it has
no filters.
"""
config = copy.deepcopy(OLD_LOGGING)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
compat_patch_logging_config(config)
self.assertEqual(len(w), 1)
self.assertEqual(
config["handlers"]["mail_admins"]["filters"],
['require_debug_false'])
def test_filter_configuration(self):
"""
Test that the auto-added require_debug_false filter is an instance of
`RequireDebugFalse` filter class.
"""
config = copy.deepcopy(OLD_LOGGING)
with warnings.catch_warnings(record=True):
compat_patch_logging_config(config)
flt = config["filters"]["require_debug_false"]
self.assertEqual(flt["()"], "django.utils.log.RequireDebugFalse")
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_no_patch_if_filters_key_exists(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler already has a "filters" key.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"]["mail_admins"]["filters"] = []
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
def test_no_patch_if_no_mail_admins_handler(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler is not present.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"].pop("mail_admins")
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
@skipUnless(PYVERS > (2,6), "warnings captured only in Python >= 2.7")
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for DeprecationWarnings is enabled
and captured to the logging system
"""
def setUp(self):
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
@override_settings(DEBUG=True)
def test_warnings_capture(self):
warnings.warn('Foo Deprecated', DeprecationWarning)
output = force_text(self.outputs[0].getvalue())
self.assertTrue('Foo Deprecated' in output)
def test_warnings_capture_debug_false(self):
warnings.warn('Foo Deprecated', DeprecationWarning)
output = force_text(self.outputs[0].getvalue())
self.assertFalse('Foo Deprecated' in output)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from regressiontests/views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertFalse('\n' in mail.outbox[0].subject)
self.assertFalse('\r' in mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "0 errors found")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SettingsConfigureLogging(TestCase):
"""
Test that calling settings.configure() initializes the logging
configuration.
"""
def test_configure_initializes_logging(self):
settings = LazySettings()
settings.configure(
LOGGING_CONFIG='regressiontests.logging_tests.tests.dictConfig')
self.assertTrue(dictConfig.called)
| {
"content_hash": "4759dff93a14d7ee44ec54535d019d01",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 116,
"avg_line_length": 32.812849162011176,
"alnum_prop": 0.6129224482846685,
"repo_name": "dvliman/jaikuengine",
"id": "a498510cfe5b8fb40d7bfa4f2b9b18aefd31d43c",
"size": "11747",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".google_appengine/lib/django-1.5/tests/regressiontests/logging_tests/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "330328"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "742946"
},
{
"name": "PHP",
"bytes": "1583005"
},
{
"name": "Python",
"bytes": "50274702"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "39632"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tempo'
copyright = """2017, Eliot Berriot"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tempodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'tempo.tex',
'tempo Documentation',
"""Eliot Berriot""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tempo', 'tempo Documentation',
["""Eliot Berriot"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tempo', 'tempo Documentation',
"""Eliot Berriot""", 'tempo',
"""Your personal log""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| {
"content_hash": "c9677964d0f4e5a267602eba0fed6909",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 80,
"avg_line_length": 31.70940170940171,
"alnum_prop": 0.6902964959568734,
"repo_name": "EliotBerriot/tempo",
"id": "6308a53b36c51096809a6e9c809171535cb437b5",
"size": "7808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "595412"
},
{
"name": "HTML",
"bytes": "74777"
},
{
"name": "JavaScript",
"bytes": "906006"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "127936"
},
{
"name": "Shell",
"bytes": "8049"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
import relpath
import optparse
import jsshells
from collections import defaultdict
base_path = os.path.split(os.path.abspath(__file__))[0]
pass_file = os.path.join(base_path, "pass")
fail_file = os.path.join(base_path, "fail")
skip_file = os.path.join(base_path, "skip")
default_engine = "carakan"
def check_expected_files():
for fn in (pass_file, fail_file, skip_file):
if not os.path.exists(fn):
f = open(fn, "w")
f.close()
def read_test_file(file_obj):
out_set = set()
for line in file_obj:
line = line.strip()
if line:
if line.startswith("#"):
continue
out_set.add(line)
return out_set
def write_test_file(out_file, test_set):
out_data = list(test_set)
out_data.sort()
for item in out_data:
out_file.write(item + "\n")
def get_all_tests():
tests = set()
for dirpath, dirnames, filenames in os.walk(base_path):
if ".svn" in dirpath or "tools" in dirpath:
continue
for fn in filenames:
if os.path.splitext(fn)[1] != ".js" or fn[0] in ("#", "."):
continue
if fn not in ("mjsunit.js", "harness.js"):
full_path = os.path.join(dirpath, fn)
tests.add(relpath.relpath(full_path, base_path))
return tests
def run_tests(shell, tests, skip):
results = {}
for test in tests:
if test in skip:
continue
else:
result = run_test(shell, test)
results[test] = result
return results
def run_test(shell, test):
result = {"rv":None,
"output":None}
args = (os.path.join(base_path, "harness.js"),
os.path.join(base_path, "mjsunit.js"),
os.path.join(base_path, test))
rv, output = shell.runCommand(args)
result["rv"] = rv
result["output"] = output
return result
def categorise_results(results):
passes = set()
fails = set()
crashes = set()
for test, result in results.iteritems():
if result["rv"] == 0:
passes.add(test)
elif result["rv"] not in (0,1):
crashes.add(test)
else:
fails.add(test)
return passes, fails, crashes
def print_test_result(test, result):
if result["rv"] == 0:
print test + ": PASS"
else:
print test + ": FAIL"
print "rv: %(rv)s\n%(output)s\n"%result
def print_results(pass_set, fail_set, crash_set, regressions, new_passes,
results, opts):
if regressions:
print "There were %i regressions"%len(regressions)
for test in regressions:
print test
else:
if crash_set:
print "%i tests crashed"%len(crash_set)
for test in crash_set:
print_test_result(test, results[test])
print "%i tests passed"%len(pass_set)
print "%i tests failed (but didn't crash)"%len(fail_set)
if opts.print_fail:
for test in fail_set:
print_test_result(test, results[test])
print "%i new passes"%len(new_passes)
def write_expected(pass_set, non_passing_set):
write_test_file(open(pass_file, "w"), pass_set)
write_test_file(open(fail_file, "w"), non_passing_set)
def get_options():
parser = optparse.OptionParser()
parser.add_option("--force-update", action="store_true",
default=False, dest="force",
help="Force update to the pass and fail files regardless of whatever else is going on")
parser.add_option("-s", action="store", dest="shell",
help="Path to shell")
parser.add_option("-e", action="store", dest="engine",
help="Path to shell")
parser.add_option("--fail", action="store_true",
default=False, dest="print_fail",
help="Print details of all failing tests")
return parser
def run_suite(shell, opts, args):
expected_pass = read_test_file(open(pass_file))
expected_fail = read_test_file(open(fail_file))
skip_tests = read_test_file(open(skip_file))
all_tests = get_all_tests()
results = run_tests(shell, all_tests, skip_tests)
pass_set, fail_set, crash_set = categorise_results(results)
non_passing_set = fail_set | crash_set
regressions = non_passing_set - expected_fail
new_passes = pass_set - expected_pass
print_results(pass_set, fail_set, crash_set, regressions, new_passes,
results, opts)
if len(regressions) == 0 or opts.force:
write_expected(pass_set, non_passing_set)
def run_individual_tests(shell, opts, args):
results = {}
for test in args:
result = run_test(shell, test)
results[test] = result
for result in results:
print_test_result(test, results[test])
def main():
opts, args = get_options().parse_args()
check_expected_files()
if opts.engine:
engine_name = opts.engine
else:
engine_name = default_engine
if opts.shell:
shell_path = opts.shell
else:
shell_path = None
shell = jsshells.shells[engine_name](shell_path)
if len(args) == 0:
run_suite(shell, opts, args)
else:
run_individual_tests(shell, opts, args)
if __name__ == "__main__":
main()
| {
"content_hash": "88020e30fac57ee1998d6d4d8d86e602",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 109,
"avg_line_length": 27.494949494949495,
"alnum_prop": 0.5756796473181485,
"repo_name": "Ms2ger/presto-testo",
"id": "8212e06a840597fb092cf400a6e7ac09d5f32df8",
"size": "5467",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "core/standards/scripts/mjsunit/harness.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2312"
},
{
"name": "ActionScript",
"bytes": "23470"
},
{
"name": "AutoHotkey",
"bytes": "8832"
},
{
"name": "Batchfile",
"bytes": "5001"
},
{
"name": "C",
"bytes": "116512"
},
{
"name": "C++",
"bytes": "219233"
},
{
"name": "CSS",
"bytes": "207914"
},
{
"name": "Erlang",
"bytes": "18523"
},
{
"name": "Groff",
"bytes": "674"
},
{
"name": "HTML",
"bytes": "103272540"
},
{
"name": "Haxe",
"bytes": "3874"
},
{
"name": "Java",
"bytes": "125658"
},
{
"name": "JavaScript",
"bytes": "22516936"
},
{
"name": "Makefile",
"bytes": "13409"
},
{
"name": "PHP",
"bytes": "524911"
},
{
"name": "Perl",
"bytes": "321672"
},
{
"name": "Python",
"bytes": "948191"
},
{
"name": "Ruby",
"bytes": "1006850"
},
{
"name": "Shell",
"bytes": "12140"
},
{
"name": "Smarty",
"bytes": "1860"
},
{
"name": "XSLT",
"bytes": "2567445"
}
],
"symlink_target": ""
} |
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import numpy
from traits.api import Float
from traitsui.api import View, Item
from distribution import Distribution
class Constant(Distribution):
""" A constant distribution where all values are the same """
value = Float
traits_view = View(Item('value'))
def _get_value(self, n):
return numpy.repeat(self.value, n)
| {
"content_hash": "f9b8afb98209792071097f959dba0d26",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.7268408551068883,
"repo_name": "enthought/etsproxy",
"id": "d224d650169bb568a9b1838f6ef768fa5064b312",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/util/distribution/constant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
class Basis(BaseBuiltin):
__slots__ = ()
GD_TYPE = lib.GODOT_VARIANT_TYPE_BASIS
@staticmethod
def _copy_gdobj(gdobj):
return godot_basis_alloc(gdobj[0])
@classmethod
def build_from_rows(cls, row0, row1, row2):
cls._check_param_type('row0', row0, Vector3)
cls._check_param_type('row1', row1, Vector3)
cls._check_param_type('row2', row2, Vector3)
gd_ptr = godot_basis_alloc()
lib.godot_basis_new_with_rows(gd_ptr, row0._gd_ptr, row1._gd_ptr, row2._gd_ptr)
return cls.build_from_gdobj(gd_ptr, steal=True)
@classmethod
def build_from_euler(cls, euler):
gd_ptr = godot_basis_alloc()
if isinstance(euler, Vector3):
lib.godot_basis_new_with_euler(gd_ptr, euler._gd_ptr)
elif isinstance(euler, Quat):
lib.godot_basis_new_with_euler_quat(gd_ptr, euler._gd_ptr)
else:
raise TypeError("Param `euler` should be of type `%s`" % (Vector3, Quat))
return cls.build_from_gdobj(gd_ptr, steal=True)
@classmethod
def build_from_axis_and_angle(cls, axis, phi):
cls._check_param_type('axis', axis, Vector3)
cls._check_param_float('phi', phi)
gd_ptr = godot_basis_alloc()
lib.godot_basis_new_with_axis_and_angle(gd_ptr, axis._gd_ptr, phi)
return cls.build_from_gdobj(gd_ptr, steal=True)
AXIS_X = 0
AXIS_Y = 1
AXIS_Z = 2
def __init__(self): # TODO: allow rows as param ?
self._gd_ptr = godot_basis_alloc()
def __repr__(self):
return "<{n}(({v.x.x}, {v.x.y}, {v.x.z}), ({v.y.x}, {v.y.y}, {v.y.z}), ({v.z.x}, {v.z.y}, {v.z.z}))>".format(n=type(self).__name__, v=self)
def __eq__(self, other):
return isinstance(other, Basis) and lib.godot_basis_operator_equal(self._gd_ptr, other._gd_ptr)
def __ne__(self, other):
return not self == other
def __neg__(self):
return type(self)(-self.x, -self.y, -self.z)
def __pos__(self):
return self
def __add__(self, val):
if isinstance(val, Basis):
gd_obj = lib.godot_basis_operator_add(self._gd_ptr, val._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
else:
return NotImplemented
def __sub__(self, val):
if isinstance(val, Basis):
gd_obj = lib.godot_basis_operator_substract(self._gd_ptr, val._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
else:
return NotImplemented
def __mul__(self, val):
if isinstance(val, Basis):
gd_obj = lib.godot_basis_operator_multiply_basis(self._gd_ptr, val._gd_ptr)
else:
gd_obj = lib.godot_basis_operator_multiply_scalar(self._gd_ptr, val)
return Basis.build_from_gdobj(gd_obj)
def __truediv__(self, val):
if isinstance(val, Basis):
gd_obj = lib.godot_basis_operator_divide_basis(self._gd_ptr, val._gd_ptr)
else:
gd_obj = lib.godot_basis_operator_divide_scalar(self._gd_ptr, val)
return Basis.build_from_gdobj(gd_obj)
# Properties
@property
def x(self):
return Vector3.build_from_gdobj(lib.godot_basis_get_axis(self._gd_ptr, self.AXIS_X))
@property
def y(self):
return Vector3.build_from_gdobj(lib.godot_basis_get_axis(self._gd_ptr, self.AXIS_Y))
@property
def z(self):
return Vector3.build_from_gdobj(lib.godot_basis_get_axis(self._gd_ptr, self.AXIS_Z))
@x.setter
def x(self, val):
self._check_param_type('val', val, Vector3)
lib.godot_basis_set_axis(self._gd_ptr, self.AXIS_X, val._gd_ptr)
@y.setter
def y(self, val):
self._check_param_type('val', val, Vector3)
lib.godot_basis_set_axis(self._gd_ptr, self.AXIS_Y, val._gd_ptr)
@z.setter
def z(self, val):
self._check_param_type('val', val, Vector3)
lib.godot_basis_set_axis(self._gd_ptr, self.AXIS_Z, val._gd_ptr)
# Methods
def determinant(self):
return lib.godot_basis_determinant(self._gd_ptr)
def get_euler(self):
gd_obj = lib.godot_basis_get_euler(self._gd_ptr)
return Vector3.build_from_gdobj(gd_obj)
def get_orthogonal_index(self):
return lib.godot_basis_get_orthogonal_index(self._gd_ptr)
def get_scale(self):
gd_obj = lib.godot_basis_get_scale(self._gd_ptr)
return Vector3.build_from_gdobj(gd_obj)
def inverse(self):
gd_obj = lib.godot_basis_inverse(self._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
def orthonormalized(self):
gd_obj = lib.godot_basis_orthonormalized(self._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
def rotated(self, axis, phi):
self._check_param_type('axis', axis, Vector3)
gd_obj = lib.godot_basis_rotated(self._gd_ptr, axis._gd_ptr, phi)
return Basis.build_from_gdobj(gd_obj)
def scaled(self, scale):
self._check_param_type('scale', scale, Vector3)
gd_obj = lib.godot_basis_scaled(self._gd_ptr, scale._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
def tdotx(self, with_):
self._check_param_type('with_', with_, Vector3)
return lib.godot_basis_tdotx(self._gd_ptr, with_._gd_ptr)
def tdoty(self, with_):
self._check_param_type('with_', with_, Vector3)
return lib.godot_basis_tdoty(self._gd_ptr, with_._gd_ptr)
def tdotz(self, with_):
self._check_param_type('with_', with_, Vector3)
return lib.godot_basis_tdotz(self._gd_ptr, with_._gd_ptr)
def transposed(self):
gd_obj = lib.godot_basis_transposed(self._gd_ptr)
return Basis.build_from_gdobj(gd_obj)
def xform(self, vect):
self._check_param_type('vect', vect, Vector3)
gd_obj = lib.godot_basis_xform(self._gd_ptr, vect._gd_ptr)
return Vector3.build_from_gdobj(gd_obj)
def xform_inv(self, vect):
self._check_param_type('vect', vect, Vector3)
gd_obj = lib.godot_basis_xform_inv(self._gd_ptr, vect._gd_ptr)
return Vector3.build_from_gdobj(gd_obj)
| {
"content_hash": "298d3c575ad8c0c00040eea7a7d84f30",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 147,
"avg_line_length": 35.01149425287356,
"alnum_prop": 0.6012803676953381,
"repo_name": "razvanc-r/godot-python",
"id": "dd245502f406354f44444835767c1ad2a3b9ad0c",
"size": "6092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonscript/cffi_bindings/builtin_basis.inc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2543"
},
{
"name": "C++",
"bytes": "61189"
},
{
"name": "GDScript",
"bytes": "1454"
},
{
"name": "Makefile",
"bytes": "4174"
},
{
"name": "Python",
"bytes": "1015802"
}
],
"symlink_target": ""
} |
import json
from datetime import datetime
from typing import Dict, List
from croniter import croniter
import frappe
from frappe.model.document import Document
from frappe.utils import get_datetime, now_datetime
from frappe.utils.background_jobs import enqueue, get_jobs
class ScheduledJobType(Document):
def autoname(self):
self.name = ".".join(self.method.split(".")[-2:])
def validate(self):
if self.frequency != "All":
# force logging for all events other than continuous ones (ALL)
self.create_log = 1
def enqueue(self, force=False):
# enqueue event if last execution is done
if self.is_event_due() or force:
if frappe.flags.enqueued_jobs:
frappe.flags.enqueued_jobs.append(self.method)
if frappe.flags.execute_job:
self.execute()
else:
if not self.is_job_in_queue():
enqueue('frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job',
queue = self.get_queue_name(), job_type=self.method)
return True
return False
def is_event_due(self, current_time = None):
'''Return true if event is due based on time lapsed since last execution'''
# if the next scheduled event is before NOW, then its due!
return self.get_next_execution() <= (current_time or now_datetime())
def is_job_in_queue(self):
queued_jobs = get_jobs(site=frappe.local.site, key='job_type')[frappe.local.site]
return self.method in queued_jobs
def get_next_execution(self):
CRON_MAP = {
"Yearly": "0 0 1 1 *",
"Annual": "0 0 1 1 *",
"Monthly": "0 0 1 * *",
"Monthly Long": "0 0 1 * *",
"Weekly": "0 0 * * 0",
"Weekly Long": "0 0 * * 0",
"Daily": "0 0 * * *",
"Daily Long": "0 0 * * *",
"Hourly": "0 * * * *",
"Hourly Long": "0 * * * *",
"All": "0/" + str((frappe.get_conf().scheduler_interval or 240) // 60) + " * * * *",
}
if not self.cron_format:
self.cron_format = CRON_MAP[self.frequency]
return croniter(self.cron_format,
get_datetime(self.last_execution or datetime(2000, 1, 1))).get_next(datetime)
def execute(self):
self.scheduler_log = None
try:
self.log_status('Start')
if self.server_script:
script_name = frappe.db.get_value("Server Script", self.server_script)
if script_name:
frappe.get_doc('Server Script', script_name).execute_scheduled_method()
else:
frappe.get_attr(self.method)()
frappe.db.commit()
self.log_status('Complete')
except Exception:
frappe.db.rollback()
self.log_status('Failed')
def log_status(self, status):
# log file
frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}")
self.update_scheduler_log(status)
def update_scheduler_log(self, status):
if not self.create_log:
# self.get_next_execution will work properly iff self.last_execution is properly set
if self.frequency == "All" and status == 'Start':
self.db_set('last_execution', now_datetime(), update_modified=False)
frappe.db.commit()
return
if not self.scheduler_log:
self.scheduler_log = frappe.get_doc(dict(doctype = 'Scheduled Job Log', scheduled_job_type=self.name)).insert(ignore_permissions=True)
self.scheduler_log.db_set('status', status)
if status == 'Failed':
self.scheduler_log.db_set('details', frappe.get_traceback())
if status == 'Start':
self.db_set('last_execution', now_datetime(), update_modified=False)
frappe.db.commit()
def get_queue_name(self):
return 'long' if ('Long' in self.frequency) else 'default'
def on_trash(self):
frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name})
@frappe.whitelist()
def execute_event(doc: str):
frappe.only_for("System Manager")
doc = json.loads(doc)
frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True)
return doc
def run_scheduled_job(job_type: str):
"""This is a wrapper function that runs a hooks.scheduler_events method"""
try:
frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute()
except Exception:
print(frappe.get_traceback())
def sync_jobs(hooks: Dict = None):
frappe.reload_doc("core", "doctype", "scheduled_job_type")
scheduler_events = hooks or frappe.get_hooks("scheduler_events")
all_events = insert_events(scheduler_events)
clear_events(all_events)
def insert_events(scheduler_events: Dict) -> List:
cron_jobs, event_jobs = [], []
for event_type in scheduler_events:
events = scheduler_events.get(event_type)
if isinstance(events, dict):
cron_jobs += insert_cron_jobs(events)
else:
# hourly, daily etc
event_jobs += insert_event_jobs(events, event_type)
return cron_jobs + event_jobs
def insert_cron_jobs(events: Dict) -> List:
cron_jobs = []
for cron_format in events:
for event in events.get(cron_format):
cron_jobs.append(event)
insert_single_event("Cron", event, cron_format)
return cron_jobs
def insert_event_jobs(events: List, event_type: str) -> List:
event_jobs = []
for event in events:
event_jobs.append(event)
frequency = event_type.replace("_", " ").title()
insert_single_event(frequency, event)
return event_jobs
def insert_single_event(frequency: str, event: str, cron_format: str = None):
cron_expr = {"cron_format": cron_format} if cron_format else {}
doc = frappe.get_doc(
{
"doctype": "Scheduled Job Type",
"method": event,
"cron_format": cron_format,
"frequency": frequency,
}
)
if not frappe.db.exists(
"Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr}
):
try:
doc.insert()
except frappe.DuplicateEntryError:
doc.delete()
doc.insert()
def clear_events(all_events: List):
for event in frappe.get_all(
"Scheduled Job Type", fields=["name", "method", "server_script"]
):
is_server_script = event.server_script
is_defined_in_hooks = event.method in all_events
if not (is_defined_in_hooks or is_server_script):
frappe.delete_doc("Scheduled Job Type", event.name)
| {
"content_hash": "c0091e67d064e126fa65f83d919ab55d",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 137,
"avg_line_length": 30.427835051546392,
"alnum_prop": 0.6845671692359817,
"repo_name": "mhbu50/frappe",
"id": "1a795bab82588807cf95ffcc4900599dcfe7bd37",
"size": "5991",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
from appannie.util import format_request_data
class FeaturesMetadata(object):
CATEGORIES_ENDPOINT = '/meta/feature/categories'
TYPES_ENDPOINT = '/meta/feature/types'
def __init__(self, http_client, market):
self.http_client = http_client
self.market = market
def categories(self, countries=None):
data = format_request_data(market=self.market, countries=countries)
r = self.http_client.request(self.CATEGORIES_ENDPOINT, data)
return r.get('all_category_pages')
def types(self):
data = format_request_data(market=self.market)
r = self.http_client.request(self.TYPES_ENDPOINT, data)
return r.get('types')
| {
"content_hash": "b5372327b52073f58d63935c36b7c5c3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 34.6,
"alnum_prop": 0.6748554913294798,
"repo_name": "webhue/appannie",
"id": "5dc1468294f63c5994f4227fae8999e4f2c09b58",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appannie/metadata/features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34572"
}
],
"symlink_target": ""
} |
"""Convenience classes for configuring Vizier Study Configs and Search Spaces.
This module contains several classes, used to access/build Vizier StudyConfig
protos:
* `StudyConfig` class is the main class, which:
1) Allows to easily build Vizier StudyConfig protos via a convenient
Python API.
2) Can be initialized from an existing StudyConfig proto, to enable easy
Pythonic accessors to information contained in StudyConfig protos,
and easy field editing capabilities.
* `SearchSpace` and `SearchSpaceSelector` classes deals with Vizier search
spaces. Both flat spaces and conditional parameters are supported.
"""
import collections
import copy
import enum
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import attr
from google.cloud.aiplatform.vizier.pyvizier.automated_stopping import (
AutomatedStoppingConfig,
)
from google.cloud.aiplatform.vizier.pyvizier import proto_converters
from google.cloud.aiplatform.vizier.pyvizier import SearchSpace
from google.cloud.aiplatform.vizier.pyvizier import ProblemStatement
from google.cloud.aiplatform.vizier.pyvizier import ObjectiveMetricGoal
from google.cloud.aiplatform.vizier.pyvizier import SearchSpaceSelector
from google.cloud.aiplatform.vizier.pyvizier import MetricsConfig
from google.cloud.aiplatform.vizier.pyvizier import MetricInformation
from google.cloud.aiplatform.vizier.pyvizier import Trial
from google.cloud.aiplatform.vizier.pyvizier import ParameterValueTypes
from google.cloud.aiplatform.vizier.pyvizier import ParameterConfig
from google.cloud.aiplatform.compat.types import study as study_pb2
################### PyTypes ###################
# A sequence of possible internal parameter values.
# Possible types for trial parameter values after cast to external types.
ParameterValueSequence = Union[
ParameterValueTypes,
Sequence[int],
Sequence[float],
Sequence[str],
Sequence[bool],
]
################### Enums ###################
class Algorithm(enum.Enum):
"""Valid Values for StudyConfig.Algorithm."""
ALGORITHM_UNSPECIFIED = study_pb2.StudySpec.Algorithm.ALGORITHM_UNSPECIFIED
# GAUSSIAN_PROCESS_BANDIT = study_pb2.StudySpec.Algorithm.GAUSSIAN_PROCESS_BANDIT
GRID_SEARCH = study_pb2.StudySpec.Algorithm.GRID_SEARCH
RANDOM_SEARCH = study_pb2.StudySpec.Algorithm.RANDOM_SEARCH
# NSGA2 = study_pb2.StudySpec.Algorithm.NSGA2
# EMUKIT_GP_EI = study_pb2.StudySpec.Algorithm.EMUKIT_GP_EI
class ObservationNoise(enum.Enum):
"""Valid Values for StudyConfig.ObservationNoise."""
OBSERVATION_NOISE_UNSPECIFIED = (
study_pb2.StudySpec.ObservationNoise.OBSERVATION_NOISE_UNSPECIFIED
)
LOW = study_pb2.StudySpec.ObservationNoise.LOW
HIGH = study_pb2.StudySpec.ObservationNoise.HIGH
################### Classes For Various Config Protos ###################
@attr.define(frozen=False, init=True, slots=True, kw_only=True)
class MetricInformationConverter:
"""A wrapper for vizier_pb2.MetricInformation."""
@classmethod
def from_proto(cls, proto: study_pb2.StudySpec.MetricSpec) -> MetricInformation:
"""Converts a MetricInformation proto to a MetricInformation object."""
if proto.goal not in list(ObjectiveMetricGoal):
raise ValueError("Unknown MetricInformation.goal: {}".format(proto.goal))
return MetricInformation(
name=proto.metric_id,
goal=proto.goal,
safety_threshold=None,
safety_std_threshold=None,
min_value=None,
max_value=None,
)
@classmethod
def to_proto(cls, obj: MetricInformation) -> study_pb2.StudySpec.MetricSpec:
"""Returns this object as a proto."""
return study_pb2.StudySpec.MetricSpec(metric_id=obj.name, goal=obj.goal.value)
class MetricsConfig(MetricsConfig):
"""Metrics config."""
@classmethod
def from_proto(
cls, protos: Iterable[study_pb2.StudySpec.MetricSpec]
) -> "MetricsConfig":
return cls(MetricInformationConverter.from_proto(m) for m in protos)
def to_proto(self) -> List[study_pb2.StudySpec.MetricSpec]:
return [MetricInformationConverter.to_proto(metric) for metric in self]
SearchSpaceSelector = SearchSpaceSelector
@attr.define(frozen=True, init=True, slots=True, kw_only=True)
class SearchSpace(SearchSpace):
"""A Selector for all, or part of a SearchSpace."""
@classmethod
def from_proto(cls, proto: study_pb2.StudySpec) -> "SearchSpace":
"""Extracts a SearchSpace object from a StudyConfig proto."""
parameter_configs = []
for pc in proto.parameters:
parameter_configs.append(
proto_converters.ParameterConfigConverter.from_proto(pc)
)
return cls._factory(parameter_configs=parameter_configs)
@property
def parameter_protos(self) -> List[study_pb2.StudySpec.ParameterSpec]:
"""Returns the search space as a List of ParameterConfig protos."""
return [
proto_converters.ParameterConfigConverter.to_proto(pc)
for pc in self._parameter_configs
]
################### Main Class ###################
#
# A StudyConfig object can be initialized:
# (1) From a StudyConfig proto using StudyConfig.from_proto():
# study_config_proto = study_pb2.StudySpec(...)
# study_config = pyvizier.StudyConfig.from_proto(study_config_proto)
# # Attributes can be modified.
# new_proto = study_config.to_proto()
#
# (2) By directly calling __init__ and setting attributes:
# study_config = pyvizier.StudyConfig(
# metric_information=[pyvizier.MetricInformation(
# name='accuracy', goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE)],
# search_space=SearchSpace.from_proto(proto),
# )
# # OR:
# study_config = pyvizier.StudyConfig()
# study_config.metric_information.append(
# pyvizier.MetricInformation(
# name='accuracy', goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE))
#
# # Since building a search space is more involved, get a reference to the
# # search space, and add parameters to it.
# root = study_config.search_space.select_root()
# root.add_float_param('learning_rate', 0.001, 1.0,
# scale_type=pyvizier.ScaleType.LOG)
#
@attr.define(frozen=False, init=True, slots=True, kw_only=True)
class StudyConfig(ProblemStatement):
"""A builder and wrapper for study_pb2.StudySpec proto."""
search_space: SearchSpace = attr.field(
init=True,
factory=SearchSpace,
validator=attr.validators.instance_of(SearchSpace),
on_setattr=attr.setters.validate,
)
algorithm: Algorithm = attr.field(
init=True,
validator=attr.validators.instance_of(Algorithm),
on_setattr=[attr.setters.convert, attr.setters.validate],
default=Algorithm.ALGORITHM_UNSPECIFIED,
kw_only=True,
)
metric_information: MetricsConfig = attr.field(
init=True,
factory=MetricsConfig,
converter=MetricsConfig,
validator=attr.validators.instance_of(MetricsConfig),
kw_only=True,
)
observation_noise: ObservationNoise = attr.field(
init=True,
validator=attr.validators.instance_of(ObservationNoise),
on_setattr=attr.setters.validate,
default=ObservationNoise.OBSERVATION_NOISE_UNSPECIFIED,
kw_only=True,
)
automated_stopping_config: Optional[AutomatedStoppingConfig] = attr.field(
init=True,
default=None,
validator=attr.validators.optional(
attr.validators.instance_of(AutomatedStoppingConfig)
),
on_setattr=attr.setters.validate,
kw_only=True,
)
# An internal representation as a StudyConfig proto.
# If this object was created from a StudyConfig proto, a copy of the original
# proto is kept, to make sure that unknown proto fields are preserved in
# round trip serialization.
# TODO: Fix the broken proto validation.
_study_config: study_pb2.StudySpec = attr.field(
init=True, factory=study_pb2.StudySpec, kw_only=True
)
# Public attributes, methods and properties.
@classmethod
def from_proto(cls, proto: study_pb2.StudySpec) -> "StudyConfig":
"""Converts a StudyConfig proto to a StudyConfig object.
Args:
proto: StudyConfig proto.
Returns:
A StudyConfig object.
"""
metric_information = MetricsConfig(
sorted(
[MetricInformationConverter.from_proto(m) for m in proto.metrics],
key=lambda x: x.name,
)
)
oneof_name = proto._pb.WhichOneof("automated_stopping_spec")
if not oneof_name:
automated_stopping_config = None
else:
automated_stopping_config = AutomatedStoppingConfig.from_proto(
getattr(proto, oneof_name)
)
return cls(
search_space=SearchSpace.from_proto(proto),
algorithm=Algorithm(proto.algorithm),
metric_information=metric_information,
observation_noise=ObservationNoise(proto.observation_noise),
automated_stopping_config=automated_stopping_config,
study_config=copy.deepcopy(proto),
)
def to_proto(self) -> study_pb2.StudySpec:
"""Serializes this object to a StudyConfig proto."""
proto = copy.deepcopy(self._study_config)
proto.algorithm = self.algorithm.value
proto.observation_noise = self.observation_noise.value
del proto.metrics[:]
proto.metrics.extend(self.metric_information.to_proto())
del proto.parameters[:]
proto.parameters.extend(self.search_space.parameter_protos)
if self.automated_stopping_config is not None:
auto_stop_proto = self.automated_stopping_config.to_proto()
if isinstance(
auto_stop_proto, study_pb2.StudySpec.DecayCurveAutomatedStoppingSpec
):
proto.decay_curve_stopping_spec = copy.deepcopy(auto_stop_proto)
elif isinstance(
auto_stop_proto, study_pb2.StudySpec.DecayCurveAutomatedStoppingSpec
):
for method_name in dir(proto.decay_curve_stopping_spec):
if callable(
getattr(proto.median_automated_stopping_spec, method_name)
):
print(method_name)
proto.median_automated_stopping_spec = copy.deepcopy(auto_stop_proto)
return proto
@property
def is_single_objective(self) -> bool:
"""Returns True if only one objective metric is configured."""
return len(self.metric_information) == 1
@property
def single_objective_metric_name(self) -> Optional[str]:
"""Returns the name of the single-objective metric, if set.
Returns:
String: name of the single-objective metric.
None: if this is not a single-objective study.
"""
if len(self.metric_information) == 1:
return list(self.metric_information)[0].name
return None
def _trial_to_external_values(
self, pytrial: Trial
) -> Dict[str, Union[float, int, str, bool]]:
"""Returns the trial paremeter values cast to external types."""
parameter_values: Dict[str, Union[float, int, str]] = {}
external_values: Dict[str, Union[float, int, str, bool]] = {}
# parameter_configs is a list of Tuple[parent_name, ParameterConfig].
parameter_configs: List[Tuple[Optional[str], ParameterConfig]] = [
(None, p) for p in self.search_space.parameters
]
remaining_parameters = copy.deepcopy(pytrial.parameters)
# Traverse the conditional tree using a BFS.
while parameter_configs and remaining_parameters:
parent_name, pc = parameter_configs.pop(0)
parameter_configs.extend(
(pc.name, child) for child in pc.child_parameter_configs
)
if pc.name not in remaining_parameters:
continue
if parent_name is not None:
# This is a child parameter. If the parent was not seen,
# skip this parameter config.
if parent_name not in parameter_values:
continue
parent_value = parameter_values[parent_name]
if parent_value not in pc.matching_parent_values:
continue
parameter_values[pc.name] = remaining_parameters[pc.name].value
if pc.external_type is None:
external_value = remaining_parameters[pc.name].value
else:
external_value = remaining_parameters[pc.name].cast(
pc.external_type
) # pytype: disable=wrong-arg-types
external_values[pc.name] = external_value
remaining_parameters.pop(pc.name)
return external_values
def trial_parameters(
self, proto: study_pb2.Trial
) -> Dict[str, ParameterValueSequence]:
"""Returns the trial values, cast to external types, if they exist.
Args:
proto:
Returns:
Parameter values dict: cast to each parameter's external_type, if exists.
NOTE that the values in the dict may be a Sequence as opposed to a single
element.
Raises:
ValueError: If the trial parameters do not exist in this search space.
ValueError: If the trial contains duplicate parameters.
"""
pytrial = proto_converters.TrialConverter.from_proto(proto)
return self._pytrial_parameters(pytrial)
def _pytrial_parameters(self, pytrial: Trial) -> Dict[str, ParameterValueSequence]:
"""Returns the trial values, cast to external types, if they exist.
Args:
pytrial:
Returns:
Parameter values dict: cast to each parameter's external_type, if exists.
NOTE that the values in the dict may be a Sequence as opposed to a single
element.
Raises:
ValueError: If the trial parameters do not exist in this search space.
ValueError: If the trial contains duplicate parameters.
"""
trial_external_values: Dict[
str, Union[float, int, str, bool]
] = self._trial_to_external_values(pytrial)
if len(trial_external_values) != len(pytrial.parameters):
raise ValueError(
"Invalid trial for this search space: failed to convert "
"all trial parameters: {}".format(pytrial)
)
# Combine multi-dimensional parameter values to a list of values.
trial_final_values: Dict[str, ParameterValueSequence] = {}
# multi_dim_params: Dict[str, List[Tuple[int, ParameterValueSequence]]]
multi_dim_params = collections.defaultdict(list)
for name in trial_external_values:
base_index = SearchSpaceSelector.parse_multi_dimensional_parameter_name(
name
)
if base_index is None:
trial_final_values[name] = trial_external_values[name]
else:
base_name, index = base_index
multi_dim_params[base_name].append((index, trial_external_values[name]))
for name in multi_dim_params:
multi_dim_params[name].sort(key=lambda x: x[0])
trial_final_values[name] = [x[1] for x in multi_dim_params[name]]
return trial_final_values
def trial_metrics(
self, proto: study_pb2.Trial, *, include_all_metrics=False
) -> Dict[str, float]:
"""Returns the trial's final measurement metric values.
If the trial is not completed, or infeasible, no metrics are returned.
By default, only metrics configured in the StudyConfig are returned
(e.g. only objective and safety metrics).
Args:
proto:
include_all_metrics: If True, all metrics in the final measurements are
returned. If False, only metrics configured in the StudyConfig are
returned.
Returns:
Dict[metric name, metric value]
"""
pytrial = proto_converters.TrialConverter.from_proto(proto)
return self._pytrial_metrics(pytrial, include_all_metrics=include_all_metrics)
def _pytrial_metrics(
self, pytrial: Trial, *, include_all_metrics=False
) -> Dict[str, float]:
"""Returns the trial's final measurement metric values.
If the trial is not completed, or infeasible, no metrics are returned.
By default, only metrics configured in the StudyConfig are returned
(e.g. only objective and safety metrics).
Args:
pytrial:
include_all_metrics: If True, all metrics in the final measurements are
returned. If False, only metrics configured in the StudyConfig are
returned.
Returns:
Dict[metric name, metric value]
"""
configured_metrics = [m.name for m in self.metric_information]
metrics: Dict[str, float] = {}
if pytrial.is_completed and not pytrial.infeasible:
for name in pytrial.final_measurement.metrics:
if include_all_metrics or (
not include_all_metrics and name in configured_metrics
):
# Special case: Measurement always adds an empty metric by default.
# If there is a named single objective in study_config, drop the empty
# metric.
if not name and self.single_objective_metric_name != name:
continue
metrics[name] = pytrial.final_measurement.metrics[name].value
return metrics
| {
"content_hash": "1a297e107134ef142da95276bb471ced",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 90,
"avg_line_length": 39.65562913907285,
"alnum_prop": 0.647851258071699,
"repo_name": "googleapis/python-aiplatform",
"id": "0314e1442f587854c1b12853ece67b8c59f54531",
"size": "17964",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform/vizier/pyvizier/study_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
"""
Class definitions to represent a molecular system and its chemical components
.. todo::
* Create MoleculeImage, ParticleImage, AtomImage, VirtualSiteImage here. (Or ``MoleculeInstance``?)
* Create ``MoleculeGraph`` to represent fozen set of atom elements and bonds that can used as a key for compression
* Add hierarchical way of traversing Topology (chains, residues)
* Make all classes hashable and serializable.
* JSON/BSON representations of objects?
* Use `attrs <http://www.attrs.org/>`_ for object setter boilerplate?
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import itertools
from collections import OrderedDict
from collections.abc import MutableMapping
import numpy as np
from simtk import unit
from simtk.openmm import app
from openff.toolkit.typing.chemistry import ChemicalEnvironment
from openff.toolkit.utils import MessageException
from openff.toolkit.utils.serialization import Serializable
from openff.toolkit.utils.toolkits import (
ALLOWED_AROMATICITY_MODELS,
ALLOWED_CHARGE_MODELS,
ALLOWED_FRACTIONAL_BOND_ORDER_MODELS,
DEFAULT_AROMATICITY_MODEL,
GLOBAL_TOOLKIT_REGISTRY,
)
# =============================================================================================
# Exceptions
# =============================================================================================
class DuplicateUniqueMoleculeError(MessageException):
"""
Exception for when the user provides indistinguishable unique molecules when trying to identify atoms from a PDB
"""
pass
class NotBondedError(MessageException):
"""
Exception for when a function requires a bond between two atoms, but none is present
"""
pass
class InvalidBoxVectorsError(MessageException):
"""
Exception for setting invalid box vectors
"""
class InvalidPeriodicityError(MessageException):
"""
Exception for setting invalid periodicity
"""
class MissingUniqueMoleculesError(MessageException):
"""
Exception for a when unique_molecules is required but not found
"""
# =============================================================================================
# PRIVATE SUBROUTINES
# =============================================================================================
class _TransformedDict(MutableMapping):
"""A dictionary that transform and sort keys.
The function __keytransform__ can be inherited to apply an arbitrary
key-altering function before accessing the keys.
The function __sortfunc__ can be inherited to specify a particular
order over which to iterate over the dictionary.
"""
def __init__(self, *args, **kwargs):
self.store = OrderedDict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(sorted(self.store, key=self.__sortfunc__))
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
@staticmethod
def __sortfunc__(key):
return key
# TODO: Encapsulate this atom ordering logic directly into Atom/Bond/Angle/Torsion classes?
class ValenceDict(_TransformedDict):
"""Enforce uniqueness in atom indices."""
@staticmethod
def key_transform(key):
"""Reverse tuple if first element is larger than last element."""
# Ensure key is a tuple.
key = tuple(key)
assert len(key) > 0 and len(key) < 5, "Valence keys must be at most 4 atoms"
# Reverse the key if the first element is bigger than the last.
if key[0] > key[-1]:
key = tuple(reversed(key))
return key
@staticmethod
def index_of(key, possible=None):
"""
Generates a canonical ordering of the equivalent permutations of ``key`` (equivalent rearrangements of indices)
and identifies which of those possible orderings this particular ordering is. This method is useful when
multiple SMARTS patterns might match the same atoms, but local molecular symmetry or the use of
wildcards in the SMARTS could make the matches occur in arbitrary order.
This method can be restricted to a subset of the canonical orderings, by providing
the optional ``possible`` keyword argument. If provided, the index returned by this method will be
the index of the element in ``possible`` after undergoing the same canonical sorting as above.
Parameters
----------
key : iterable of int
A valid key for ValenceDict
possible : iterable of iterable of int, optional. default=``None``
A subset of the possible orderings that this match might take.
Returns
-------
index : int
"""
assert len(key) < 4
refkey = __class__.key_transform(key)
if len(key) == 2:
permutations = OrderedDict(
{(refkey[0], refkey[1]): 0, (refkey[1], refkey[0]): 1}
)
elif len(key) == 3:
permutations = OrderedDict(
{
(refkey[0], refkey[1], refkey[2]): 0,
(refkey[2], refkey[1], refkey[0]): 1,
}
)
else:
# For a proper, only forward/backward makes sense
permutations = OrderedDict(
{
(refkey[0], refkey[1], refkey[2], refkey[3]): 0,
(refkey[3], refkey[1], refkey[2], refkey[0]): 1,
}
)
if possible is not None:
i = 0
# If the possible permutations were provided, ensure that `possible` is a SUBSET of `permutations`
assert all([p in permutations for p in possible]), (
"Possible permutations " + str(possible) + " is impossible!"
)
# TODO: Double-check whether this will generalize. It seems like this would fail if ``key``
# were in ``permutations``, but not ``possible``
for k in permutations:
if all([x == y for x, y in zip(key, k)]):
return i
if k in possible:
i += 1
else:
# If the possible permutations were NOT provided, then return the unique index of this permutation.
return permutations[key]
def __keytransform__(self, key):
return __class__.key_transform(key)
class SortedDict(_TransformedDict):
"""Enforce uniqueness of atom index tuples, without any restrictions on atom reordering."""
def __keytransform__(self, key):
"""Sort tuple from lowest to highest."""
# Ensure key is a tuple.
key = tuple(sorted(key))
# Reverse the key if the first element is bigger than the last.
return key
class ImproperDict(_TransformedDict):
"""Symmetrize improper torsions."""
@staticmethod
def key_transform(key):
"""Reorder tuple in numerical order except for element[1] which is the central atom; it retains its position."""
# Ensure key is a tuple
key = tuple(key)
assert len(key) == 4, "Improper keys must be 4 atoms"
# Retrieve connected atoms
connectedatoms = [key[0], key[2], key[3]]
# Sort connected atoms
connectedatoms.sort()
# Re-store connected atoms
key = tuple([connectedatoms[0], key[1], connectedatoms[1], connectedatoms[2]])
return key
@staticmethod
def index_of(key, possible=None):
"""
Generates a canonical ordering of the equivalent permutations of ``key`` (equivalent rearrangements of indices)
and identifies which of those possible orderings this particular ordering is. This method is useful when
multiple SMARTS patterns might match the same atoms, but local molecular symmetry or the use of
wildcards in the SMARTS could make the matches occur in arbitrary order.
This method can be restricted to a subset of the canonical orderings, by providing
the optional ``possible`` keyword argument. If provided, the index returned by this method will be
the index of the element in ``possible`` after undergoing the same canonical sorting as above.
Parameters
----------
key : iterable of int
A valid key for ValenceDict
possible : iterable of iterable of int, optional. default=``None``
A subset of the possible orderings that this match might take.
Returns
-------
index : int
"""
assert len(key) == 4
refkey = __class__.key_transform(key)
permutations = OrderedDict(
{
(refkey[0], refkey[1], refkey[2], refkey[3]): 0,
(refkey[0], refkey[1], refkey[3], refkey[2]): 1,
(refkey[2], refkey[1], refkey[0], refkey[3]): 2,
(refkey[2], refkey[1], refkey[3], refkey[0]): 3,
(refkey[3], refkey[1], refkey[0], refkey[2]): 4,
(refkey[3], refkey[1], refkey[2], refkey[0]): 5,
}
)
if possible is not None:
assert all(
[p in permutations for p in possible]
), "Possible permuation is impossible!"
i = 0
for k in permutations:
if all([x == y for x, y in zip(key, k)]):
return i
if k in possible:
i += 1
else:
return permutations[key]
def __keytransform__(self, key):
return __class__.key_transform(key)
# =============================================================================================
# TOPOLOGY OBJECTS
# =============================================================================================
# =============================================================================================
# TopologyAtom
# =============================================================================================
class TopologyAtom(Serializable):
"""
A TopologyAtom is a lightweight data structure that represents a single openff.toolkit.topology.molecule.Atom in
a Topology. A TopologyAtom consists of two references -- One to its fully detailed "atom", an
openff.toolkit.topology.molecule.Atom, and another to its parent "topology_molecule", which occupies a spot in
the parent Topology's TopologyMolecule list.
As some systems can be very large, there is no always-existing representation of a TopologyAtom. They are created on
demand as the user requests them.
.. warning :: This API is experimental and subject to change.
"""
def __init__(self, atom, topology_molecule):
"""
Create a new TopologyAtom.
Parameters
----------
atom : An openff.toolkit.topology.molecule.Atom
The reference atom
topology_molecule : An openff.toolkit.topology.TopologyMolecule
The TopologyMolecule that this TopologyAtom belongs to
"""
# TODO: Type checks
self._atom = atom
self._topology_molecule = topology_molecule
@property
def atom(self):
"""
Get the reference Atom for this TopologyAtom.
Returns
-------
an openff.toolkit.topology.molecule.Atom
"""
return self._atom
@property
def atomic_number(self):
"""
Get the atomic number of this atom
Returns
-------
int
"""
return self._atom.atomic_number
@property
def element(self):
"""
Get the element name of this atom.
Returns
-------
simtk.openmm.app.element.Element
"""
return self._atom.element
@property
def topology_molecule(self):
"""
Get the TopologyMolecule that this TopologyAtom belongs to.
Returns
-------
openff.toolkit.topology.TopologyMolecule
"""
return self._topology_molecule
@property
def molecule(self):
"""
Get the reference Molecule that this TopologyAtom belongs to.
Returns
-------
openff.toolkit.topology.molecule.Molecule
"""
return self._topology_molecule.molecule
@property
def topology_atom_index(self):
"""
Get the index of this atom in its parent Topology.
Returns
-------
int
The index of this atom in its parent topology.
"""
mapped_molecule_atom_index = self._topology_molecule._ref_to_top_index[
self._atom.molecule_atom_index
]
return (
self._topology_molecule.atom_start_topology_index
+ mapped_molecule_atom_index
)
@property
def topology_particle_index(self):
"""
Get the index of this particle in its parent Topology.
Returns
-------
int
The index of this atom in its parent topology.
"""
# This assumes that the particles in a topology are listed with all atoms from all TopologyMolecules
# first, followed by all VirtualSites from all TopologyMolecules second
return self.topology_atom_index
@property
def topology_bonds(self):
"""
Get the TopologyBonds connected to this TopologyAtom.
Returns
-------
iterator of openff.toolkit.topology.TopologyBonds
"""
for bond in self._atom.bonds:
reference_mol_bond_index = bond.molecule_bond_index
yield self._topology_molecule.bond(reference_mol_bond_index)
def __eq__(self, other):
return (self._atom == other._atom) and (
self._topology_molecule == other._topology_molecule
)
def __repr__(self):
return "TopologyAtom {} with reference atom {} and parent TopologyMolecule {}".format(
self.topology_atom_index, self._atom, self._topology_molecule
)
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
# @property
# def bonds(self):
# """
# Get the Bonds connected to this TopologyAtom.
#
# Returns
# -------
# iterator of openff.toolkit.topology.molecule.Bonds
# """
# for bond in self._atom.bonds:
# yield bond
# TODO: Add all atom properties here? Or just make people use TopologyAtom.atom for that?
# =============================================================================================
# TopologyBond
# =============================================================================================
class TopologyBond(Serializable):
"""
A TopologyBond is a lightweight data structure that represents a single openff.toolkit.topology.molecule.Bond in
a Topology. A TopologyBond consists of two references -- One to its fully detailed "bond", an
openff.toolkit.topology.molecule.Bond, and another to its parent "topology_molecule", which occupies a spot in
the parent Topology's TopologyMolecule list.
As some systems can be very large, there is no always-existing representation of a TopologyBond. They are created on
demand as the user requests them.
.. warning :: This API is experimental and subject to change.
"""
def __init__(self, bond, topology_molecule):
"""
Parameters
----------
bond : An openff.toolkit.topology.molecule.Bond
The reference bond.
topology_molecule : An openff.toolkit.topology.TopologyMolecule
The TopologyMolecule that this TopologyBond belongs to.
"""
# TODO: Type checks
self._bond = bond
self._topology_molecule = topology_molecule
@property
def bond(self):
"""
Get the reference Bond for this TopologyBond.
Returns
-------
an openff.toolkit.topology.molecule.Bond
"""
return self._bond
@property
def topology_molecule(self):
"""
Get the TopologyMolecule that this TopologyBond belongs to.
Returns
-------
openff.toolkit.topology.TopologyMolecule
"""
return self._topology_molecule
@property
def topology_bond_index(self):
"""
Get the index of this bond in its parent Topology.
Returns
-------
int
The index of this bond in its parent topology.
"""
return (
self._topology_molecule.bond_start_topology_index
+ self._bond.molecule_bond_index
)
@property
def molecule(self):
"""
Get the reference Molecule that this TopologyBond belongs to.
Returns
-------
openff.toolkit.topology.molecule.Molecule
"""
return self._topology_molecule.molecule
@property
def bond_order(self):
"""
Get the order of this TopologyBond.
Returns
-------
int : bond order
"""
return self._bond.bond_order
@property
def atoms(self):
"""
Get the TopologyAtoms connected to this TopologyBond.
Returns
-------
iterator of openff.toolkit.topology.TopologyAtom
"""
for ref_atom in self._bond.atoms:
yield TopologyAtom(ref_atom, self._topology_molecule)
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
# =============================================================================================
# TopologyVirtualSite
# =============================================================================================
class TopologyVirtualSite(Serializable):
"""
A TopologyVirtualSite is a lightweight data structure that represents a single
openff.toolkit.topology.molecule.VirtualSite in a Topology. A TopologyVirtualSite consists of two references --
One to its fully detailed "VirtualSite", an openff.toolkit.topology.molecule.VirtualSite, and another to its parent
"topology_molecule", which occupies a spot in the parent Topology's TopologyMolecule list.
As some systems can be very large, there is no always-existing representation of a TopologyVirtualSite. They are
created on demand as the user requests them.
.. warning :: This API is experimental and subject to change.
"""
def __init__(self, virtual_site, topology_molecule):
"""
Parameters
----------
virtual_site : An openff.toolkit.topology.molecule.VirtualSite
The reference virtual site
topology_molecule : An openff.toolkit.topology.TopologyMolecule
The TopologyMolecule that this TopologyVirtualSite belongs to
"""
# TODO: Type checks
self._virtual_site = virtual_site
self._topology_molecule = topology_molecule
self._topology_virtual_particle_start_index = None
def invalidate_cached_data(self):
self._topology_virtual_particle_start_index = None
def atom(self, index):
"""
Get the atom at a specific index in this TopologyVirtualSite
Parameters
----------
index : int
The index of the atom in the reference VirtualSite to retrieve
Returns
-------
TopologyAtom
"""
return TopologyAtom(self._virtual_site.atoms[index], self.topology_molecule)
@property
def atoms(self):
"""
Get the TopologyAtoms involved in this TopologyVirtualSite.
Returns
-------
iterator of openff.toolkit.topology.TopologyAtom
"""
for ref_atom in self._virtual_site.atoms:
yield TopologyAtom(ref_atom, self._topology_molecule)
@property
def virtual_site(self):
"""
Get the reference VirtualSite for this TopologyVirtualSite.
Returns
-------
an openff.toolkit.topology.molecule.VirtualSite
"""
return self._virtual_site
@property
def topology_molecule(self):
"""
Get the TopologyMolecule that this TopologyVirtualSite belongs to.
Returns
-------
openff.toolkit.topology.TopologyMolecule
"""
return self._topology_molecule
@property
def topology_virtual_site_index(self):
"""
Get the index of this virtual site in its parent Topology.
Returns
-------
int
The index of this virtual site in its parent topology.
"""
return (
self._topology_molecule.virtual_site_start_topology_index
+ self._virtual_site.molecule_virtual_site_index
)
@property
def n_particles(self):
"""
Get the number of particles represented by this VirtualSite
Returns
-------
int : The number of particles
"""
return self._virtual_site.n_particles
@property
def topology_virtual_particle_start_index(self):
"""
Get the index of the first virtual site particle in its parent Topology.
Returns
-------
int
The index of this particle in its parent topology.
"""
# This assumes that the particles in a topology are listed with all
# atoms from all TopologyMolecules first, followed by all VirtualSites
# from all TopologyMolecules second
# If the cached value is not available, generate it
if self._topology_virtual_particle_start_index is None:
virtual_particle_start_topology_index = (
self.topology_molecule.topology.n_topology_atoms
)
for (
topology_molecule
) in self._topology_molecule._topology.topology_molecules:
for tvsite in topology_molecule.virtual_sites:
if self == tvsite:
break
virtual_particle_start_topology_index += tvsite.n_particles
if self._topology_molecule == topology_molecule:
break
# else:
# virtual_particle_start_topology_index += tvsite.n_particles
# virtual_particle_start_topology_index += topology_molecule.n_particles
self._topology_virtual_particle_start_index = (
virtual_particle_start_topology_index
)
# Return cached value
# print(self._topology_virtual_particle_start_index)
return self._topology_virtual_particle_start_index
@property
def particles(self):
"""
Get an iterator to the reference particles that this TopologyVirtualSite
contains.
Returns
-------
iterator of TopologyVirtualParticles
"""
for vptl in self.virtual_site.particles:
yield TopologyVirtualParticle(
self._virtual_site, vptl, self._topology_molecule
)
@property
def molecule(self):
"""
Get the reference Molecule that this TopologyVirtualSite belongs to.
Returns
-------
openff.toolkit.topology.molecule.Molecule
"""
return self._topology_molecule.molecule
@property
def type(self):
"""
Get the type of this virtual site
Returns
-------
str : The class name of this virtual site
"""
return self._virtual_site.type
def __eq__(self, other):
return (self._virtual_site == other._virtual_site) and (
self._topology_molecule == other._topology_molecule
)
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
# =============================================================================================
# TopologyVirtualParticle
# =============================================================================================
class TopologyVirtualParticle(TopologyVirtualSite):
def __init__(self, virtual_site, virtual_particle, topology_molecule):
self._virtual_site = virtual_site
self._virtual_particle = virtual_particle
self._topology_molecule = topology_molecule
def __eq__(self, other):
if type(other) != type(self):
return False
same_vsite = super() == super(TopologyVirtualParticle, other)
if not same_vsite:
return False
same_ptl = self.topology_particle_index == other.topology_particle_index
return same_ptl
@property
def topology_particle_index(self):
"""
Get the index of this particle in its parent Topology.
Returns
-------
idx : int
The index of this particle in its parent topology.
"""
# This assumes that the particles in a topology are listed with all atoms from all TopologyMolecules
# first, followed by all VirtualSites from all TopologyMolecules second
orientation_key = self._virtual_particle.orientation
offset = 0
# vsite is a topology vsite, which has a regular vsite
for i, ornt in enumerate(self._virtual_site._virtual_site.orientations):
if ornt == orientation_key:
offset = i
break
return offset + self._virtual_site.topology_virtual_particle_start_index
# =============================================================================================
# TopologyMolecule
# =============================================================================================
class TopologyMolecule:
"""
TopologyMolecules are built to be an efficient way to store large numbers of copies of the same molecule for
parameterization and system preparation.
.. warning :: This API is experimental and subject to change.
"""
def __init__(
self, reference_molecule, topology, local_topology_to_reference_index=None
):
"""
Create a new TopologyMolecule.
Parameters
----------
reference_molecule : an openff.toolkit.topology.molecule.Molecule
The reference molecule, with details like formal charges, partial charges, bond orders, partial bond orders,
and atomic symbols.
topology : an openff.toolkit.topology.Topology
The topology that this TopologyMolecule belongs to
local_topology_to_reference_index : dict, optional, default=None
Dictionary of {TopologyMolecule_atom_index : Molecule_atom_index} for the TopologyMolecule that will be built
"""
# TODO: Type checks
self._reference_molecule = reference_molecule
self._topology = topology
if local_topology_to_reference_index is None:
local_topology_to_reference_index = dict(
[(i, i) for i in range(reference_molecule.n_atoms)]
)
self._top_to_ref_index = local_topology_to_reference_index
self._ref_to_top_index = dict(
(k, j) for j, k in local_topology_to_reference_index.items()
)
# Initialize cached data
self._atom_start_topology_index = None
self._particle_start_topology_index = None
self._bond_start_topology_index = None
self._virtual_site_start_topology_index = None
self._virtual_particle_start_topology_index = None
def _invalidate_cached_data(self):
"""Unset all cached data, in response to an appropriate change"""
self._atom_start_topology_index = None
self._particle_start_topology_index = None
self._bond_start_topology_index = None
self._virtual_site_start_topology_index = None
self._virtual_particle_start_topology_index = None
for vsite in self.virtual_sites:
vsite.invalidate_cached_data()
@property
def topology(self):
"""
Get the topology that this TopologyMolecule belongs to
Returns
-------
an openff.toolkit.topology.Topology
"""
return self._topology
@property
def reference_molecule(self):
"""
Get the reference molecule for this TopologyMolecule
Returns
-------
an openff.toolkit.topology.molecule.Molecule
"""
return self._reference_molecule
@property
def n_atoms(self):
"""
The number of atoms in this topology.
Returns
-------
int
"""
return self._reference_molecule.n_atoms
def atom(self, index):
"""
Get the TopologyAtom with a given topology atom index in this TopologyMolecule.
Parameters
----------
index : int
Index of the TopologyAtom within this TopologyMolecule to retrieve
Returns
-------
an openff.toolkit.topology.TopologyAtom
"""
ref_mol_atom_index = self._top_to_ref_index[index]
return TopologyAtom(self._reference_molecule.atoms[ref_mol_atom_index], self)
@property
def atoms(self):
"""
Return an iterator of all the TopologyAtoms in this TopologyMolecule
Returns
-------
an iterator of openff.toolkit.topology.TopologyAtoms
"""
iterate_order = list(self._top_to_ref_index.items())
# Sort by topology index
iterate_order.sort(key=lambda x: x[0])
for top_index, ref_index in iterate_order:
# self._reference_molecule.atoms:
yield TopologyAtom(self._reference_molecule.atoms[ref_index], self)
@property
def atom_start_topology_index(self):
"""
Get the topology index of the first atom in this TopologyMolecule
"""
# If cached value is not available, generate it.
if self._atom_start_topology_index is None:
atom_start_topology_index = 0
for topology_molecule in self._topology.topology_molecules:
if self == topology_molecule:
self._atom_start_topology_index = atom_start_topology_index
break
atom_start_topology_index += topology_molecule.n_atoms
# Return cached value
return self._atom_start_topology_index
@property
def virtual_particle_start_topology_index(self):
"""
Get the topology index of the first virtual particle in this TopologyMolecule
"""
# If cached value is not available, generate it.
if self._virtual_particle_start_topology_index is None:
particle_start_topology_index = self.topology.n_atoms
for topology_molecule in self._topology.topology_molecules:
if self == topology_molecule:
self._particle_start_topology_index = particle_start_topology_index
break
offset = sum(
[vsite.n_particles for vsite in topology_molecule.virtual_sites]
)
particle_start_topology_index += offset
self._virtual_particle_start_topology_index = particle_start_topology_index
# Return cached value
return self._virtual_particle_start_topology_index
def bond(self, index):
"""
Get the TopologyBond with a given reference molecule index in this TopologyMolecule
Parameters
----------
index : int
Index of the TopologyBond within this TopologyMolecule to retrieve
Returns
-------
an openff.toolkit.topology.TopologyBond
"""
return TopologyBond(self.reference_molecule.bonds[index], self)
@property
def bonds(self):
"""
Return an iterator of all the TopologyBonds in this TopologyMolecule
Returns
-------
an iterator of openff.toolkit.topology.TopologyBonds
"""
for bond in self._reference_molecule.bonds:
yield TopologyBond(bond, self)
@property
def n_bonds(self):
"""Get the number of bonds in this TopologyMolecule
Returns
-------
int : number of bonds
"""
return self._reference_molecule.n_bonds
@property
def bond_start_topology_index(self):
"""Get the topology index of the first bond in this TopologyMolecule"""
# If cached value is not available, generate it.
if self._bond_start_topology_index is None:
bond_start_topology_index = 0
for topology_molecule in self._topology.topology_molecules:
if self == topology_molecule:
self._bond_start_topology_index = bond_start_topology_index
break
bond_start_topology_index += topology_molecule.n_bonds
# Return cached value
return self._bond_start_topology_index
def particle(self, index):
"""
Get the TopologyParticle with a given reference molecule index in this TopologyMolecule
Parameters
----------
index : int
Index of the TopologyParticle within this TopologyMolecule to retrieve
Returns
-------
an openff.toolkit.topology.TopologyParticle
"""
return TopologyParticle(self.reference_molecule.particles[index], self)
@property
def particles(self):
"""
Return an iterator of all the TopologyParticles in this TopologyMolecules
Returns
-------
an iterator of openff.toolkit.topology.TopologyParticle
"""
# Note: This assumes that particles are all Atoms (in topology map order), and then virtualsites
yield_order = list(self._top_to_ref_index.items())
# Sort by topology atom index
yield_order.sort(key=lambda x: x[0])
for top_atom_index, ref_mol_atom_index in yield_order:
ref_atom = self._reference_molecule.atoms[ref_mol_atom_index]
yield TopologyAtom(ref_atom, self)
for vsite in self.reference_molecule.virtual_sites:
tvsite = TopologyVirtualSite(vsite, self)
for vptl in vsite.particles:
yield TopologyVirtualParticle(tvsite, vptl, self)
@property
def n_particles(self):
"""Get the number of particles in this TopologyMolecule
Returns
-------
int : The number of particles
"""
return self._reference_molecule.n_particles
def virtual_site(self, index):
"""
Get the TopologyVirtualSite with a given reference molecule index in this TopologyMolecule
Parameters
----------
index : int
Index of the TopologyVirtualSite within this TopologyMolecule to retrieve
Returns
-------
an openff.toolkit.topology.TopologyVirtualSite
"""
return TopologyVirtualSite(self.reference_molecule.virtual_sites[index], self)
@property
def virtual_sites(self):
"""
Return an iterator of all the TopologyVirtualSites in this TopologyMolecules
Returns
-------
an iterator of openff.toolkit.topology.TopologyVirtualSite
"""
for vsite in self._reference_molecule.virtual_sites:
yield TopologyVirtualSite(vsite, self)
@property
def n_virtual_sites(self):
"""Get the number of virtual sites in this TopologyMolecule
Returns
-------
int
"""
return self._reference_molecule.n_virtual_sites
@property
def angles(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the angles in this TopologyMolecule."""
return self._convert_to_topology_atom_tuples(self._reference_molecule.angles)
@property
def n_angles(self):
"""int: number of angles in this TopologyMolecule."""
return self._reference_molecule.n_angles
@property
def propers(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the proper torsions in this TopologyMolecule."""
return self._convert_to_topology_atom_tuples(self._reference_molecule.propers)
@property
def n_propers(self):
"""int: number of proper torsions in this TopologyMolecule."""
return self._reference_molecule.n_propers
@property
def impropers(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the possible improper torsions in this TopologyMolecule."""
return self._convert_to_topology_atom_tuples(self._reference_molecule.impropers)
@property
def n_impropers(self):
"""int: number of possible improper torsions in this TopologyMolecule."""
return self._reference_molecule.n_impropers
@property
def smirnoff_impropers(self):
"""
Note that it's possible that a trivalent center will not have an improper assigned.
This will depend on the force field that is used.
Also note that this will return 6 possible atom orderings around each improper
center. In current SMIRNOFF parameterization, three of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angles. These three orderings capture the three unique
angles that could be calculated around the improper center, therefore the sum
of these three terms will always return a consistent energy.
For more details on the use of three-fold ('trefoil') impropers, see
https://open-forcefield-toolkit.readthedocs.io/en/latest/smirnoff.html#impropertorsions
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed second
in each tuple.
.. todo::
* Offer a way to do the keytransform and get the final 3 orderings in this
method? How can we keep this logic synced up with the parameterization
machinery?
"""
return self._convert_to_topology_atom_tuples(
self._reference_molecule.smirnoff_impropers
)
@property
def amber_impropers(self):
"""
Iterate over improper torsions in the molecule, but only those with
trivalent centers, reporting the central atom first in each improper.
Note that it's possible that a trivalent center will not have an improper assigned.
This will depend on the force field that is used.
Also note that this will return 6 possible atom orderings around each improper
center. In current AMBER parameterization, one of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angle. This method does not encode the logic to
determine which of the six orderings AMBER would use.
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed first in
each tuple.
"""
return self._convert_to_topology_atom_tuples(
self._reference_molecule.amber_impropers
)
@property
def virtual_site_start_topology_index(self):
"""Get the topology index of the first virtual site in this TopologyMolecule"""
# If the cached value is not available, generate it
if self._virtual_site_start_topology_index is None:
virtual_site_start_topology_index = 0
for topology_molecule in self._topology.topology_molecules:
if self == topology_molecule:
self._virtual_site_start_topology_index = (
virtual_site_start_topology_index
)
virtual_site_start_topology_index += topology_molecule.n_virtual_sites
# Return cached value
return self._virtual_site_start_topology_index
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
def _convert_to_topology_atom_tuples(self, molecule_atom_tuples):
for atom_tuple in molecule_atom_tuples:
mol_atom_indices = (a.molecule_atom_index for a in atom_tuple)
top_mol_atom_indices = (
self._ref_to_top_index[mol_idx] for mol_idx in mol_atom_indices
)
yield tuple(self.atom(i) for i in top_mol_atom_indices)
# TODO: pick back up figuring out how we want TopologyMolecules to know their starting TopologyParticle indices
# =============================================================================================
# Topology
# =============================================================================================
# TODO: Revise documentation and remove chains
class Topology(Serializable):
"""
A Topology is a chemical representation of a system containing one or more molecules appearing in a specified order.
As of the 0.7.0 release, the Topology particle indexing system puts all atoms before all virtualsites.
This ensures that atoms keep the same Topology particle index value, even if the Topology
is modified during system creation by the addition of virtual sites.
.. warning :: This API is experimental and subject to change.
Examples
--------
Import some utilities
>>> from simtk.openmm import app
>>> from openff.toolkit.tests.utils import get_data_file_path, get_packmol_pdb_file_path
>>> pdb_filepath = get_packmol_pdb_file_path('cyclohexane_ethanol_0.4_0.6')
>>> monomer_names = ('cyclohexane', 'ethanol')
Create a Topology object from a PDB file and sdf files defining the molecular contents
>>> from openff.toolkit.topology import Molecule, Topology
>>> pdbfile = app.PDBFile(pdb_filepath)
>>> sdf_filepaths = [get_data_file_path(f'systems/monomers/{name}.sdf') for name in monomer_names]
>>> unique_molecules = [Molecule.from_file(sdf_filepath) for sdf_filepath in sdf_filepaths]
>>> topology = Topology.from_openmm(pdbfile.topology, unique_molecules=unique_molecules)
Create a Topology object from a PDB file and IUPAC names of the molecular contents
>>> pdbfile = app.PDBFile(pdb_filepath)
>>> unique_molecules = [Molecule.from_iupac(name) for name in monomer_names]
>>> topology = Topology.from_openmm(pdbfile.topology, unique_molecules=unique_molecules)
Create an empty Topology object and add a few copies of a single benzene molecule
>>> topology = Topology()
>>> molecule = Molecule.from_iupac('benzene')
>>> molecule_topology_indices = [topology.add_molecule(molecule) for index in range(10)]
"""
def __init__(self, other=None):
"""
Create a new Topology.
Parameters
----------
other : optional, default=None
If specified, attempt to construct a copy of the Topology from the specified object.
This might be a Topology object, or a file that can be used to construct a Topology object
or serialized Topology object.
"""
from openff.toolkit.topology.molecule import FrozenMolecule
# Assign cheminformatics models
model = DEFAULT_AROMATICITY_MODEL
self._aromaticity_model = model
# self._fractional_bond_order_model = DEFAULT_FRACTIONAL_BOND_ORDER_MODEL
# self._charge_model = DEFAULT_CHARGE_MODEL
# Initialize storage
self._initialize()
# TODO: Try to construct Topology copy from `other` if specified
if isinstance(other, Topology):
self.copy_initializer(other)
elif isinstance(other, FrozenMolecule):
self.from_molecules([other])
elif isinstance(other, OrderedDict):
self._initialize_from_dict(other)
def _initialize(self):
"""
Initializes a blank Topology.
"""
self._aromaticity_model = DEFAULT_AROMATICITY_MODEL
self._constrained_atom_pairs = dict()
self._box_vectors = None
# self._reference_molecule_dicts = set()
# TODO: Look into weakref and what it does. Having multiple topologies might cause a memory leak.
self._reference_molecule_to_topology_molecules = OrderedDict()
self._topology_molecules = list()
@property
def reference_molecules(self):
"""
Get an iterator of reference molecules in this Topology.
Returns
-------
iterable of openff.toolkit.topology.Molecule
"""
for ref_mol in self._reference_molecule_to_topology_molecules.keys():
yield ref_mol
@classmethod
def from_molecules(cls, molecules):
"""
Create a new Topology object containing one copy of each of the specified molecule(s).
Parameters
----------
molecules : Molecule or iterable of Molecules
One or more molecules to be added to the Topology
Returns
-------
topology : Topology
The Topology created from the specified molecule(s)
"""
# Ensure that we are working with an iterable
try:
iter(molecules)
except TypeError as te:
# Make iterable object
molecules = [molecules]
# Create Topology and populate it with specified molecules
topology = cls()
for molecule in molecules:
topology.add_molecule(molecule)
return topology
def assert_bonded(self, atom1, atom2):
"""
Raise an exception if the specified atoms are not bonded in the topology.
Parameters
----------
atom1, atom2 : openff.toolkit.topology.Atom or int
The atoms or atom topology indices to check to ensure they are bonded
"""
if (type(atom1) is int) and (type(atom2) is int):
atom1 = self.atom(atom1)
atom2 = self.atom(atom2)
# else:
if not (self.is_bonded(atom1, atom2)):
# TODO: Raise more specific exception.
raise Exception(
"Atoms {} and {} are not bonded in topology".format(atom1, atom2)
)
@property
def aromaticity_model(self):
"""
Get the aromaticity model applied to all molecules in the topology.
Returns
-------
aromaticity_model : str
Aromaticity model in use.
"""
return self._aromaticity_model
@aromaticity_model.setter
def aromaticity_model(self, aromaticity_model):
"""
Set the aromaticity model applied to all molecules in the topology.
Parameters
----------
aromaticity_model : str
Aromaticity model to use. One of: ['OEAroModel_MDL']
"""
if not aromaticity_model in ALLOWED_AROMATICITY_MODELS:
msg = "Aromaticity model must be one of {}; specified '{}'".format(
ALLOWED_AROMATICITY_MODELS, aromaticity_model
)
raise ValueError(msg)
self._aromaticity_model = aromaticity_model
@property
def box_vectors(self):
"""Return the box vectors of the topology, if specified
Returns
-------
box_vectors : simtk.unit.Quantity wrapped numpy array of shape (3, 3)
The unit-wrapped box vectors of this topology
"""
return self._box_vectors
@box_vectors.setter
def box_vectors(self, box_vectors):
"""
Sets the box vectors to be used for this topology.
Parameters
----------
box_vectors : simtk.unit.Quantity wrapped numpy array of shape (3, 3)
The unit-wrapped box vectors
"""
if box_vectors is None:
self._box_vectors = None
return
if not hasattr(box_vectors, "unit"):
raise InvalidBoxVectorsError("Given unitless box vectors")
if not (unit.angstrom.is_compatible(box_vectors.unit)):
raise InvalidBoxVectorsError(
"Attempting to set box vectors in units that are incompatible with simtk.unit.Angstrom"
)
if hasattr(box_vectors, "shape"):
if box_vectors.shape == (3,):
box_vectors *= np.eye(3)
if box_vectors.shape != (3, 3):
raise InvalidBoxVectorsError(
f"Box vectors must be shape (3, 3). Found shape {box_vectors.shape}"
)
elif isinstance(box_vectors._value, list):
if len(box_vectors) == 3:
box_vectors._value *= np.eye(3)
self._box_vectors = box_vectors
@property
def is_periodic(self):
"""Return whether or not this Topology is intended to be described with periodic
boundary conditions."""
return self.box_vectors is not None
@is_periodic.setter
def is_periodic(self, is_periodic):
"""
Set the partial charge model used for all molecules in the topology.
Parameters
----------
is_periodic : bool
Whether or not this Topology is periodici
"""
if is_periodic is True and self.box_vectors is None:
raise InvalidPeriodicityError(
"Cannot set is_periodic to True without box vectors. Set box "
"vectors directly instead."
)
if is_periodic is False and self.box_vectors is not None:
raise InvalidPeriodicityError(
"Cannot set is_periodic to False while box vectors are stored. "
"First set box_vectors to None."
)
@property
def charge_model(self):
"""
Get the partial charge model applied to all molecules in the topology.
Returns
-------
charge_model : str
Charge model used for all molecules in the Topology.
"""
return self._charge_model
@charge_model.setter
def charge_model(self, charge_model):
"""
Set the partial charge model used for all molecules in the topology.
Parameters
----------
charge_model : str
Charge model to use for all molecules in the Topology.
Allowed values: ['AM1-BCC']
* ``AM1-BCC``: Canonical AM1-BCC scheme
"""
if not charge_model in ALLOWED_CHARGE_MODELS:
raise ValueError(
"Charge model must be one of {}; specified '{}'".format(
ALLOWED_CHARGE_MODELS, charge_model
)
)
self._charge_model = charge_model
@property
def constrained_atom_pairs(self):
"""Returns the constrained atom pairs of the Topology
Returns
-------
constrained_atom_pairs : dict
dictionary of the form d[(atom1_topology_index, atom2_topology_index)] = distance (float)
"""
return self._constrained_atom_pairs
@property
def fractional_bond_order_model(self):
"""
Get the fractional bond order model for the Topology.
Returns
-------
fractional_bond_order_model : str
Fractional bond order model in use.
"""
return self._fractional_bond_order_model
@fractional_bond_order_model.setter
def fractional_bond_order_model(self, fractional_bond_order_model):
"""
Set the fractional bond order model applied to all molecules in the topology.
Parameters
----------
fractional_bond_order_model : str
Fractional bond order model to use. One of: ['Wiberg']
"""
if not fractional_bond_order_model in ALLOWED_FRACTIONAL_BOND_ORDER_MODELS:
raise ValueError(
"Fractional bond order model must be one of {}; specified '{}'".format(
ALLOWED_FRACTIONAL_BOND_ORDER_MODELS, fractional_bond_order_model
)
)
self._fractional_bond_order_model = fractional_bond_order_model
@property
def n_reference_molecules(self):
"""
Returns the number of reference (unique) molecules in in this Topology.
Returns
-------
n_reference_molecules : int
"""
count = 0
for i in self.reference_molecules:
count += 1
return count
@property
def n_topology_molecules(self):
"""
Returns the number of topology molecules in in this Topology.
Returns
-------
n_topology_molecules : int
"""
return len(self._topology_molecules)
@property
def topology_molecules(self):
"""Returns an iterator over all the TopologyMolecules in this Topology
Returns
-------
topology_molecules : Iterable of TopologyMolecule
"""
return self._topology_molecules
@property
def n_topology_atoms(self):
"""
Returns the number of topology atoms in in this Topology.
Returns
-------
n_topology_atoms : int
"""
n_atoms = 0
for reference_molecule in self.reference_molecules:
n_atoms_per_topology_molecule = reference_molecule.n_atoms
n_instances_of_topology_molecule = len(
self._reference_molecule_to_topology_molecules[reference_molecule]
)
n_atoms += n_atoms_per_topology_molecule * n_instances_of_topology_molecule
return n_atoms
@property
def topology_atoms(self):
"""Returns an iterator over the atoms in this Topology. These will be in ascending order of topology index (Note
that this is not necessarily the same as the reference molecule index)
Returns
-------
topology_atoms : Iterable of TopologyAtom
"""
for topology_molecule in self._topology_molecules:
for atom in topology_molecule.atoms:
yield atom
@property
def n_topology_bonds(self):
"""
Returns the number of TopologyBonds in in this Topology.
Returns
-------
n_bonds : int
"""
n_bonds = 0
for reference_molecule in self.reference_molecules:
n_bonds_per_topology_molecule = reference_molecule.n_bonds
n_instances_of_topology_molecule = len(
self._reference_molecule_to_topology_molecules[reference_molecule]
)
n_bonds += n_bonds_per_topology_molecule * n_instances_of_topology_molecule
return n_bonds
@property
def topology_bonds(self):
"""Returns an iterator over the bonds in this Topology
Returns
-------
topology_bonds : Iterable of TopologyBond
"""
for topology_molecule in self._topology_molecules:
for bond in topology_molecule.bonds:
yield bond
@property
def n_topology_particles(self):
"""
Returns the number of topology particles (TopologyAtoms and TopologyVirtualSites) in in this Topology.
Returns
-------
n_topology_particles : int
"""
n_particles = 0
for reference_molecule in self.reference_molecules:
n_particles_per_topology_molecule = reference_molecule.n_particles
n_instances_of_topology_molecule = len(
self._reference_molecule_to_topology_molecules[reference_molecule]
)
n_particles += (
n_particles_per_topology_molecule * n_instances_of_topology_molecule
)
return n_particles
@property
def topology_particles(self):
"""Returns an iterator over the particles (TopologyAtoms and TopologyVirtualSites) in this Topology. The
TopologyAtoms will be in order of ascending Topology index (Note that this may differ from the
order of atoms in the reference molecule index).
Returns
--------
topology_particles : Iterable of TopologyAtom and TopologyVirtualSite
"""
for topology_molecule in self._topology_molecules:
for atom in topology_molecule.atoms:
yield atom
for topology_molecule in self._topology_molecules:
for vs in topology_molecule.virtual_sites:
for vp in vs.particles:
yield vp
@property
def n_topology_virtual_sites(self):
"""
Returns the number of TopologyVirtualSites in in this Topology.
Returns
-------
n_virtual_sites : iterable of TopologyVirtualSites
"""
n_virtual_sites = 0
for reference_molecule in self.reference_molecules:
n_virtual_sites_per_topology_molecule = reference_molecule.n_virtual_sites
n_instances_of_topology_molecule = len(
self._reference_molecule_to_topology_molecules[reference_molecule]
)
n_virtual_sites += (
n_virtual_sites_per_topology_molecule * n_instances_of_topology_molecule
)
return n_virtual_sites
@property
def topology_virtual_sites(self):
"""Get an iterator over the virtual sites in this Topology
Returns
-------
topology_virtual_sites : Iterable of TopologyVirtualSite
"""
for topology_molecule in self._topology_molecules:
for virtual_site in topology_molecule.virtual_sites:
yield virtual_site
@property
def n_angles(self):
"""int: number of angles in this Topology."""
return sum(mol.n_angles for mol in self._topology_molecules)
@property
def angles(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the angles in this Topology."""
for topology_molecule in self._topology_molecules:
for angle in topology_molecule.angles:
yield angle
@property
def n_propers(self):
"""int: number of proper torsions in this Topology."""
return sum(mol.n_propers for mol in self._topology_molecules)
@property
def propers(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the proper torsions in this Topology."""
for topology_molecule in self._topology_molecules:
for proper in topology_molecule.propers:
yield proper
@property
def n_impropers(self):
"""int: number of possible improper torsions in this Topology."""
return sum(mol.n_impropers for mol in self._topology_molecules)
@property
def impropers(self):
"""Iterable of Tuple[TopologyAtom]: iterator over the possible improper torsions in this Topology."""
for topology_molecule in self._topology_molecules:
for improper in topology_molecule.impropers:
yield improper
@property
def smirnoff_impropers(self):
"""
Iterate over improper torsions in the molecule, but only those with
trivalent centers, reporting the central atom second in each improper.
Note that it's possible that a trivalent center will not have an improper assigned.
This will depend on the force field that is used.
Also note that this will return 6 possible atom orderings around each improper
center. In current SMIRNOFF parameterization, three of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angles. These three orderings capture the three unique
angles that could be calculated around the improper center, therefore the sum
of these three terms will always return a consistent energy.
For more details on the use of three-fold ('trefoil') impropers, see
https://open-forcefield-toolkit.readthedocs.io/en/latest/smirnoff.html#impropertorsions
.. todo:: Offer a way to do the keytransform and get the final 3 orderings in this
method? How can we keep this logic synced up with the parameterization
machinery?
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed second
in each tuple.
See Also
--------
impropers, amber_impropers
"""
for topology_molecule in self._topology_molecules:
for smirnoff_improper in topology_molecule.smirnoff_impropers:
yield smirnoff_improper
@property
def amber_impropers(self):
"""
Iterate over improper torsions in the molecule, but only those with
trivalent centers, reporting the central atom first in each improper.
Note that it's possible that a trivalent center will not have an improper assigned.
This will depend on the force field that is used.
Also note that this will return 6 possible atom orderings around each improper
center. In current AMBER parameterization, one of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angle. This method does not encode the logic to
determine which of the six orderings AMBER would use.
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed first in
each tuple.
See Also
--------
impropers, smirnoff_impropers
"""
for topology_molecule in self._topology_molecules:
for amber_improper in topology_molecule.amber_impropers:
yield amber_improper
class _ChemicalEnvironmentMatch:
"""Represents the match of a given chemical environment query, storing
both the matched topology atom indices and the indices of the corresponding
reference molecule atoms, as well as a reference to the reference molecule.
"""
@property
def reference_atom_indices(self):
"""tuple of int: The indices of the corresponding reference molecule atoms."""
return self._reference_atom_indices
@property
def reference_molecule(self):
"""topology.molecule.Molecule: The corresponding reference molecule."""
return self._reference_molecule
@property
def topology_atom_indices(self):
"""tuple of int: The matched topology atom indices."""
return self._topology_atom_indices
def __init__(
self, reference_atom_indices, reference_molecule, topology_atom_indices
):
"""Constructs a new _ChemicalEnvironmentMatch object
Parameters
----------
reference_atom_indices: tuple of int
The indices of the corresponding reference molecule atoms.
reference_molecule: topology.molecule.Molecule
The corresponding reference molecule.
topology_atom_indices: tuple of int
The matched topology atom indices.
"""
assert len(reference_atom_indices) == len(topology_atom_indices)
self._reference_atom_indices = reference_atom_indices
self._reference_molecule = reference_molecule
self._topology_atom_indices = topology_atom_indices
def chemical_environment_matches(
self, query, aromaticity_model="MDL", toolkit_registry=GLOBAL_TOOLKIT_REGISTRY
):
"""
Retrieve all matches for a given chemical environment query.
TODO:
* Do we want to generalize this to other kinds of queries too, like mdtraj DSL, pymol selections, atom index slices, etc?
We could just call it topology.matches(query)
Parameters
----------
query : str or ChemicalEnvironment
SMARTS string (with one or more tagged atoms) or ``ChemicalEnvironment`` query
Query will internally be resolved to SMARTS using ``query.as_smarts()`` if it has an ``.as_smarts`` method.
aromaticity_model : str
Override the default aromaticity model for this topology and use the specified aromaticity model instead.
Allowed values: ['MDL']
Returns
-------
matches : list of Topology._ChemicalEnvironmentMatch
A list of tuples, containing the topology indices of the matching atoms.
"""
# Render the query to a SMARTS string
if type(query) is str:
smarts = query
elif type(query) is ChemicalEnvironment:
smarts = query.as_smarts()
else:
raise ValueError(
f"Don't know how to convert query '{query}' into SMARTS string"
)
# Perform matching on each unique molecule, unrolling the matches to all matching copies
# of that molecule in the Topology object.
matches = list()
for ref_mol in self.reference_molecules:
# Find all atomsets that match this definition in the reference molecule
# This will automatically attempt to match chemically identical atoms in
# a canonical order within the Topology
ref_mol_matches = ref_mol.chemical_environment_matches(
smarts, toolkit_registry=toolkit_registry
)
if len(ref_mol_matches) == 0:
continue
# Unroll corresponding atom indices over all instances of this molecule.
for topology_molecule in self._reference_molecule_to_topology_molecules[
ref_mol
]:
# topology_molecule_start_index = topology_molecule.atom_start_topology_index
# Loop over matches
for reference_match in ref_mol_matches:
# Collect indices of matching TopologyAtoms.
topology_atom_indices = []
for reference_molecule_atom_index in reference_match:
reference_atom = topology_molecule.reference_molecule.atoms[
reference_molecule_atom_index
]
topology_atom = TopologyAtom(reference_atom, topology_molecule)
topology_atom_indices.append(
topology_atom.topology_particle_index
)
environment_match = Topology._ChemicalEnvironmentMatch(
tuple(reference_match), ref_mol, tuple(topology_atom_indices)
)
matches.append(environment_match)
return matches
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
# TODO: Merge this into Molecule.from_networkx if/when we implement that.
# TODO: can we now remove this as we have the ability to do this in the Molecule class?
@staticmethod
def _networkx_to_hill_formula(mol_graph):
"""
Convert a networkX representation of a molecule to a molecule formula. Used in printing out
informative error messages when a molecule from an openmm topology can't be matched.
Parameters
----------
mol_graph : a networkX graph
The graph representation of a molecule
Returns
-------
formula : str
The molecular formula of the graph molecule
"""
from simtk.openmm.app import Element
# Make a flat list of all atomic numbers in the molecule
atom_nums = []
for idx in mol_graph.nodes:
atom_nums.append(mol_graph.nodes[idx]["atomic_number"])
# Count the number of instances of each atomic number
at_num_to_counts = dict([(unq, atom_nums.count(unq)) for unq in atom_nums])
symbol_to_counts = {}
# Check for C and H first, to make a correct hill formula (remember dicts in python 3.6+ are ordered)
if 6 in at_num_to_counts:
symbol_to_counts["C"] = at_num_to_counts[6]
del at_num_to_counts[6]
if 1 in at_num_to_counts:
symbol_to_counts["H"] = at_num_to_counts[1]
del at_num_to_counts[1]
# Now count instances of all elements other than C and H, in order of ascending atomic number
sorted_atom_nums = sorted(at_num_to_counts.keys())
for atom_num in sorted_atom_nums:
symbol_to_counts[
Element.getByAtomicNumber(atom_num).symbol
] = at_num_to_counts[atom_num]
# Finally format the formula as string
formula = ""
for ele, count in symbol_to_counts.items():
formula += f"{ele}{count}"
return formula
@classmethod
def from_openmm(cls, openmm_topology, unique_molecules=None):
"""
Construct an OpenFF Topology object from an OpenMM Topology object.
Parameters
----------
openmm_topology : simtk.openmm.app.Topology
An OpenMM Topology object
unique_molecules : iterable of objects that can be used to construct unique Molecule objects
All unique molecules must be provided, in any order, though multiple copies of each molecule are allowed.
The atomic elements and bond connectivity will be used to match the reference molecules
to molecule graphs appearing in the OpenMM ``Topology``. If bond orders are present in the
OpenMM ``Topology``, these will be used in matching as well.
Returns
-------
topology : openff.toolkit.topology.Topology
An OpenFF Topology object
"""
import networkx as nx
from openff.toolkit.topology.molecule import Molecule
# Check to see if the openMM system has defined bond orders, by looping over all Bonds in the Topology.
omm_has_bond_orders = True
for omm_bond in openmm_topology.bonds():
if omm_bond.order is None:
omm_has_bond_orders = False
if unique_molecules is None:
raise MissingUniqueMoleculesError(
"Topology.from_openmm requires a list of Molecule objects "
"passed as unique_molecules, but None was passed."
)
# Convert all unique mols to graphs
topology = cls()
graph_to_unq_mol = {}
for unq_mol in unique_molecules:
unq_mol_graph = unq_mol.to_networkx()
for existing_graph in graph_to_unq_mol.keys():
if Molecule.are_isomorphic(
existing_graph,
unq_mol_graph,
return_atom_map=False,
aromatic_matching=False,
formal_charge_matching=False,
bond_order_matching=omm_has_bond_orders,
atom_stereochemistry_matching=False,
bond_stereochemistry_matching=False,
)[0]:
msg = (
"Error: Two unique molecules have indistinguishable "
"graphs: {} and {}".format(
unq_mol, graph_to_unq_mol[existing_graph]
)
)
raise DuplicateUniqueMoleculeError(msg)
graph_to_unq_mol[unq_mol_graph] = unq_mol
# Convert all openMM mols to graphs
omm_topology_G = nx.Graph()
for atom in openmm_topology.atoms():
omm_topology_G.add_node(
atom.index, atomic_number=atom.element.atomic_number
)
for bond in openmm_topology.bonds():
omm_topology_G.add_edge(
bond.atom1.index, bond.atom2.index, bond_order=bond.order
)
# For each connected subgraph (molecule) in the topology, find its match in unique_molecules
topology_molecules_to_add = list()
for omm_mol_G in (
omm_topology_G.subgraph(c).copy()
for c in nx.connected_components(omm_topology_G)
):
match_found = False
for unq_mol_G in graph_to_unq_mol.keys():
isomorphic, mapping = Molecule.are_isomorphic(
omm_mol_G,
unq_mol_G,
return_atom_map=True,
aromatic_matching=False,
formal_charge_matching=False,
bond_order_matching=omm_has_bond_orders,
atom_stereochemistry_matching=False,
bond_stereochemistry_matching=False,
)
if isomorphic:
# Take the first valid atom indexing map
first_topology_atom_index = min(mapping.keys())
topology_molecules_to_add.append(
(first_topology_atom_index, unq_mol_G, mapping.items())
)
match_found = True
break
if match_found is False:
hill_formula = Molecule.to_hill_formula(omm_mol_G)
msg = f"No match found for molecule {hill_formula}. "
probably_missing_conect = [
"C",
"H",
"O",
"N",
"P",
"S",
"F",
"Cl",
"Br",
]
if hill_formula in probably_missing_conect:
msg += (
"This would be a very unusual molecule to try and parameterize, "
"and it is likely that the data source it was read from does not "
"contain connectivity information. If this molecule is coming from "
"PDB, please ensure that the file contains CONECT records. The PDB "
"format documentation (https://www.wwpdb.org/documentation/"
'file-format-content/format33/sect10.html) states "CONECT records '
"are mandatory for HET groups (excluding water) and for other bonds "
'not specified in the standard residue connectivity table."'
)
raise ValueError(msg)
# The connected_component_subgraph function above may have scrambled the molecule order, so sort molecules
# by their first atom's topology index
topology_molecules_to_add.sort(key=lambda x: x[0])
for first_index, unq_mol_G, top_to_ref_index in topology_molecules_to_add:
local_top_to_ref_index = dict(
[
(top_index - first_index, ref_index)
for top_index, ref_index in top_to_ref_index
]
)
topology.add_molecule(
graph_to_unq_mol[unq_mol_G],
local_topology_to_reference_index=local_top_to_ref_index,
)
topology.box_vectors = openmm_topology.getPeriodicBoxVectors()
# TODO: How can we preserve metadata from the openMM topology when creating the OFF topology?
return topology
def to_openmm(self, ensure_unique_atom_names=True):
"""
Create an OpenMM Topology object.
The OpenMM ``Topology`` object will have one residue per topology
molecule. Currently, the number of chains depends on how many copies
of the same molecule are in the ``Topology``. Molecules with more
than 5 copies are all assigned to a single chain, otherwise one
chain is created for each molecule. This behavior may change in
the future.
Parameters
----------
ensure_unique_atom_names : bool, optional. Default=True
Whether to check that the molecules in each molecule have
unique atom names, and regenerate them if not. Note that this
looks only at molecules, and does not guarantee uniqueness in
the entire Topology.
Returns
-------
openmm_topology : simtk.openmm.app.Topology
An OpenMM Topology object
"""
from simtk.openmm.app import Aromatic, Double, Single
from simtk.openmm.app import Topology as OMMTopology
from simtk.openmm.app import Triple
from simtk.openmm.app.element import Element as OMMElement
omm_topology = OMMTopology()
# Create unique atom names
if ensure_unique_atom_names:
for ref_mol in self.reference_molecules:
if not ref_mol.has_unique_atom_names:
ref_mol.generate_unique_atom_names()
# Keep track of which chains and residues have been added.
mol_to_chains = {}
mol_to_residues = {}
# Go through atoms in OpenFF to preserve the order.
omm_atoms = []
# We need to iterate over the topology molecules if we want to
# keep track of chains/residues as Atom.topology_molecule is
# instantiated every time and can't be used as a key.
for topology_molecule in self.topology_molecules:
for atom in topology_molecule.atoms:
reference_molecule = topology_molecule.reference_molecule
n_molecules = len(
self._reference_molecule_to_topology_molecules[reference_molecule]
)
# Add 1 chain per molecule unless there are more than 5 copies,
# in which case we add a single chain for all of them.
if n_molecules <= 5:
# We associate a chain to each molecule.
key_molecule = topology_molecule
else:
# We associate a chain to all the topology molecule.
key_molecule = reference_molecule
# Create a new chain if it doesn't exit.
try:
chain = mol_to_chains[key_molecule]
except KeyError:
chain = omm_topology.addChain()
mol_to_chains[key_molecule] = chain
# Add one molecule for each topology molecule.
try:
residue = mol_to_residues[topology_molecule]
except KeyError:
residue = omm_topology.addResidue(reference_molecule.name, chain)
mol_to_residues[topology_molecule] = residue
# Add atom.
element = OMMElement.getByAtomicNumber(atom.atomic_number)
omm_atom = omm_topology.addAtom(atom.atom.name, element, residue)
# Make sure that OpenFF and OpenMM Topology atoms have the same indices.
assert atom.topology_atom_index == int(omm_atom.id) - 1
omm_atoms.append(omm_atom)
# Add all bonds.
bond_types = {1: Single, 2: Double, 3: Triple}
for bond in self.topology_bonds:
atom1, atom2 = bond.atoms
atom1_idx, atom2_idx = atom1.topology_atom_index, atom2.topology_atom_index
bond_type = (
Aromatic if bond.bond.is_aromatic else bond_types[bond.bond_order]
)
omm_topology.addBond(
omm_atoms[atom1_idx],
omm_atoms[atom2_idx],
type=bond_type,
order=bond.bond_order,
)
if self.box_vectors is not None:
omm_topology.setPeriodicBoxVectors(self.box_vectors)
return omm_topology
def to_file(self, filename, positions, file_format="PDB", keepIds=False):
"""
Save coordinates and topology to a PDB file.
Reference: https://github.com/openforcefield/openff-toolkit/issues/502
Notes:
1. This doesn't handle virtual sites (they're ignored)
2. Atom numbering may not remain same, for example if the atoms
in water are numbered as 1001, 1002, 1003, they would change
to 1, 2, 3. This doesn't affect the topology or coordinates or
atom-ordering in any way.
3. Same issue with the amino acid names in the pdb file, they are
not returned.
Parameters
----------
filename : str
name of the pdb file to write to
positions : n_atoms x 3 numpy array or simtk.unit.Quantity-wrapped n_atoms x 3 iterable
Can be an openmm 'quantity' object which has atomic positions as a list of Vec3s along with associated units, otherwise a 3D array of UNITLESS numbers are considered as "Angstroms" by default
file_format : str
Output file format. Case insensitive. Currently only supported value is "pdb".
"""
from simtk.openmm.app import PDBFile
from simtk.unit import Quantity, angstroms
openmm_top = self.to_openmm()
if not isinstance(positions, Quantity):
positions = positions * angstroms
file_format = file_format.upper()
if file_format != "PDB":
raise NotImplementedError("Topology.to_file supports only PDB format")
# writing to PDB file
with open(filename, "w") as outfile:
PDBFile.writeFile(openmm_top, positions, outfile, keepIds)
@staticmethod
def from_mdtraj(mdtraj_topology, unique_molecules=None):
"""
Construct an OpenFF Topology object from an MDTraj Topology object.
Parameters
----------
mdtraj_topology : mdtraj.Topology
An MDTraj Topology object
unique_molecules : iterable of objects that can be used to construct unique Molecule objects
All unique molecules must be provided, in any order, though multiple copies of each molecule are allowed.
The atomic elements and bond connectivity will be used to match the reference molecules
to molecule graphs appearing in the MDTraj ``Topology``. If bond orders are present in the
MDTraj ``Topology``, these will be used in matching as well.
Returns
-------
topology : openff.toolkit.topology.Topology
An OpenFF Topology object
"""
return Topology.from_openmm(
mdtraj_topology.to_openmm(), unique_molecules=unique_molecules
)
# TODO: Jeff prepended an underscore on this before 0.2.0 release to remove it from the API.
# Before exposing this, we should look carefully at the information that is preserved/lost during this
# conversion, and make it clear what would happen to this information in a round trip. For example,
# we should know what would happen to formal and partial bond orders and charges, stereochemistry, and
# multi-conformer information. It will be important to document these risks to users, as all of these
# factors could lead to unintended behavior during system parameterization.
def _to_mdtraj(self):
"""
Create an MDTraj Topology object.
Returns
----------
mdtraj_topology : mdtraj.Topology
An MDTraj Topology object
#"""
import mdtraj as md
return md.Topology.from_openmm(self.to_openmm())
@staticmethod
def from_parmed(parmed_structure, unique_molecules=None):
"""
.. warning:: This functionality will be implemented in a future toolkit release.
Construct an OpenFF Topology object from a ParmEd Structure object.
Parameters
----------
parmed_structure : parmed.Structure
A ParmEd structure object
unique_molecules : iterable of objects that can be used to construct unique Molecule objects
All unique molecules must be provided, in any order, though multiple copies of each molecule are allowed.
The atomic elements and bond connectivity will be used to match the reference molecules
to molecule graphs appearing in the structure's ``topology`` object. If bond orders are present in the
structure's ``topology`` object, these will be used in matching as well.
Returns
-------
topology : openff.toolkit.topology.Topology
An OpenFF Topology object
"""
# TODO: Implement functionality
raise NotImplementedError
def to_parmed(self):
"""
.. warning:: This functionality will be implemented in a future toolkit release.
Create a ParmEd Structure object.
Returns
----------
parmed_structure : parmed.Structure
A ParmEd Structure objecft
"""
# TODO: Implement functionality
raise NotImplementedError
# TODO: Jeff prepended an underscore on this before 0.2.0 release to remove it from the API.
# This function is deprecated and expects the OpenEye toolkit. We need to discuss what
# to do with this functionality in light of our move to the ToolkitWrapper architecture.
# Also, as written, this function implies several things about our toolkit's ability to
# handle biopolymers. We need to discuss how much of this functionality we will expose
# and how we can make our toolkit's current scope clear to users..
@staticmethod
def _from_openeye(oemol):
"""
Create a Molecule from an OpenEye molecule.
Requires the OpenEye toolkit to be installed.
Parameters
----------
oemol : openeye.oechem.OEMol
An OpenEye molecule
Returns
-------
molecule : openff.toolkit.topology.Molecule
An OpenFF molecule
"""
# TODO: Convert this to cls.from_molecules(Molecule.from_openeye())?
# OE Hierarchical molecule view
hv = oechem.OEHierView(
oemol,
oechem.OEAssumption_BondedResidue
+ oechem.OEAssumption_ResPerceived
+ oechem.OEAssumption_PDBOrder,
)
# Create empty OpenMM Topology
topology = app.Topology()
# Dictionary used to map oe atoms to openmm atoms
oe_atom_to_openmm_at = {}
for chain in hv.GetChains():
# TODO: Fail if hv contains more than one molecule.
# Create empty OpenMM Chain
openmm_chain = topology.addChain(chain.GetChainID())
for frag in chain.GetFragments():
for hres in frag.GetResidues():
# Get OE residue
oe_res = hres.GetOEResidue()
# Create OpenMM residue
openmm_res = topology.addResidue(oe_res.GetName(), openmm_chain)
for oe_at in hres.GetAtoms():
# Select atom element based on the atomic number
element = app.element.Element.getByAtomicNumber(
oe_at.GetAtomicNum()
)
# Add atom OpenMM atom to the topology
openmm_at = topology.addAtom(
oe_at.GetName(), element, openmm_res
)
openmm_at.index = oe_at.GetIdx()
# Add atom to the mapping dictionary
oe_atom_to_openmm_at[oe_at] = openmm_at
if topology.getNumAtoms() != mol.NumAtoms():
oechem.OEThrow.Error(
"OpenMM topology and OEMol number of atoms mismatching: "
"OpenMM = {} vs OEMol = {}".format(
topology.getNumAtoms(), mol.NumAtoms()
)
)
# Count the number of bonds in the openmm topology
omm_bond_count = 0
def IsAmideBond(oe_bond):
# TODO: Can this be replaced by a SMARTS query?
# This supporting function checks if the passed bond is an amide bond or not.
# Our definition of amide bond C-N between a Carbon and a Nitrogen atom is:
# O
# ║
# CA or O-C-N-
# |
# The amide bond C-N is a single bond
if oe_bond.GetOrder() != 1:
return False
atomB = oe_bond.GetBgn()
atomE = oe_bond.GetEnd()
# The amide bond is made by Carbon and Nitrogen atoms
if not (
atomB.IsCarbon()
and atomE.IsNitrogen()
or (atomB.IsNitrogen() and atomE.IsCarbon())
):
return False
# Select Carbon and Nitrogen atoms
if atomB.IsCarbon():
C_atom = atomB
N_atom = atomE
else:
C_atom = atomE
N_atom = atomB
# Carbon and Nitrogen atoms must have 3 neighbour atoms
if not (C_atom.GetDegree() == 3 and N_atom.GetDegree() == 3):
return False
double_bonds = 0
single_bonds = 0
for bond in C_atom.GetBonds():
# The C-O bond can be single or double.
if (bond.GetBgn() == C_atom and bond.GetEnd().IsOxygen()) or (
bond.GetBgn().IsOxygen() and bond.GetEnd() == C_atom
):
if bond.GetOrder() == 2:
double_bonds += 1
if bond.GetOrder() == 1:
single_bonds += 1
# The CA-C bond is single
if (bond.GetBgn() == C_atom and bond.GetEnd().IsCarbon()) or (
bond.GetBgn().IsCarbon() and bond.GetEnd() == C_atom
):
if bond.GetOrder() == 1:
single_bonds += 1
# Just one double and one single bonds are connected to C
# In this case the bond is an amide bond
if double_bonds == 1 and single_bonds == 1:
return True
else:
return False
# Creating bonds
for oe_bond in mol.GetBonds():
# Set the bond type
if oe_bond.GetType() != "":
if oe_bond.GetType() in [
"Single",
"Double",
"Triple",
"Aromatic",
"Amide",
]:
off_bondtype = oe_bond.GetType()
else:
off_bondtype = None
else:
if oe_bond.IsAromatic():
oe_bond.SetType("Aromatic")
off_bondtype = "Aromatic"
elif oe_bond.GetOrder() == 2:
oe_bond.SetType("Double")
off_bondtype = "Double"
elif oe_bond.GetOrder() == 3:
oe_bond.SetType("Triple")
off_bondtype = "Triple"
elif IsAmideBond(oe_bond):
oe_bond.SetType("Amide")
off_bondtype = "Amide"
elif oe_bond.GetOrder() == 1:
oe_bond.SetType("Single")
off_bondtype = "Single"
else:
off_bondtype = None
molecule.add_bond(
oe_atom_to_openmm_at[oe_bond.GetBgn()],
oe_atom_to_openmm_at[oe_bond.GetEnd()],
type=off_bondtype,
order=oe_bond.GetOrder(),
)
if molecule.n_bondsphe != mol.NumBonds():
oechem.OEThrow.Error(
"OpenMM topology and OEMol number of bonds mismatching: "
"OpenMM = {} vs OEMol = {}".format(omm_bond_count, mol.NumBonds())
)
dic = mol.GetCoords()
positions = [Vec3(v[0], v[1], v[2]) for k, v in dic.items()] * unit.angstrom
return topology, positions
# TODO: Jeff prepended an underscore on this before 0.2.0 release to remove it from the API.
# This function is deprecated and expects the OpenEye toolkit. We need to discuss what
# to do with this functionality in light of our move to the ToolkitWrapper architecture.
# It also expects Topology to be organized by chain, which is not currently the case.
# Bringing this function back would require non-trivial engineering and testing, and we
# would want to discuss what "guarantee" of correctness it could offer.
def _to_openeye(self, positions=None, aromaticity_model=DEFAULT_AROMATICITY_MODEL):
"""
Create an OpenEye OEMol from the topology
Requires the OpenEye toolkit to be installed.
Returns
-------
oemol : openeye.oechem.OEMol
An OpenEye molecule
positions : simtk.unit.Quantity with shape [nparticles,3], optional, default=None
Positions to use in constructing OEMol.
If virtual sites are present in the Topology, these indices will be skipped.
NOTE: This comes from https://github.com/oess/oeommtools/blob/master/oeommtools/utils.py
"""
oe_mol = oechem.OEMol()
# Python set used to identify atoms that are not in protein residues
keep = set(proteinResidues).union(dnaResidues).union(rnaResidues)
for chain in topology.chains():
for res in chain.residues():
# Create an OEResidue
oe_res = oechem.OEResidue()
# Set OEResidue name
oe_res.SetName(res.name)
# If the atom is not a protein atom then set its heteroatom
# flag to True
if res.name not in keep:
oe_res.SetFragmentNumber(chain.index + 1)
oe_res.SetHetAtom(True)
# Set OEResidue Chain ID
oe_res.SetChainID(chain.id)
# res_idx = int(res.id) - chain.index * len(chain._residues)
# Set OEResidue number
oe_res.SetResidueNumber(int(res.id))
for openmm_at in res.atoms():
# Create an OEAtom based on the atomic number
oe_atom = oe_mol.NewAtom(openmm_at.element._atomic_number)
# Set atom name
oe_atom.SetName(openmm_at.name)
# Set Symbol
oe_atom.SetType(openmm_at.element.symbol)
# Set Atom index
oe_res.SetSerialNumber(openmm_at.index + 1)
# Commit the changes
oechem.OEAtomSetResidue(oe_atom, oe_res)
# Update the dictionary OpenMM to OE
openmm_atom_to_oe_atom[openmm_at] = oe_atom
if self.n_atoms != oe_mol.NumAtoms():
raise Exception(
"OEMol has an unexpected number of atoms: "
"Molecule has {} atoms, while OEMol has {} atoms".format(
topology.n_atom, oe_mol.NumAtoms()
)
)
# Create bonds
for off_bond in self.topology_bonds():
oe_mol.NewBond(oe_atoms[bond.atom1], oe_atoms[bond.atom2], bond.bond_order)
if off_bond.type:
if off_bond.type == "Aromatic":
oe_atom0.SetAromatic(True)
oe_atom1.SetAromatic(True)
oe_bond.SetAromatic(True)
oe_bond.SetType("Aromatic")
elif off_bond.type in ["Single", "Double", "Triple", "Amide"]:
oe_bond.SetType(omm_bond.type)
else:
oe_bond.SetType("")
if self.n_bonds != oe_mol.NumBonds():
oechem.OEThrow.Erorr(
"OEMol has an unexpected number of bonds:: "
"Molecule has {} bonds, while OEMol has {} bonds".format(
self.n_bond, oe_mol.NumBonds()
)
)
if positions is not None:
# Set the OEMol positions
particle_indices = [
atom.particle_index for atom in self.topology_atoms
] # get particle indices
pos = positions[particle_indices].value_in_units_of(unit.angstrom)
pos = list(itertools.chain.from_iterable(pos))
oe_mol.SetCoords(pos)
oechem.OESetDimensionFromCoords(oe_mol)
return oe_mol
def get_bond_between(self, i, j):
"""Returns the bond between two atoms
Parameters
----------
i, j : int or TopologyAtom
Atoms or atom indices to check
Returns
-------
bond : TopologyBond
The bond between i and j.
"""
if (type(i) is int) and (type(j) is int):
atomi = self.atom(i)
atomj = self.atom(j)
elif (type(i) is TopologyAtom) and (type(j) is TopologyAtom):
atomi = i
atomj = j
else:
raise Exception(
"Invalid input passed to is_bonded(). Expected ints or TopologyAtoms, "
"got {} and {}".format(i, j)
)
for top_bond in atomi.topology_bonds:
for top_atom in top_bond.atoms:
if top_atom == atomi:
continue
if top_atom == atomj:
return top_bond
raise NotBondedError("No bond between atom {} and {}".format(i, j))
def is_bonded(self, i, j):
"""Returns True if the two atoms are bonded
Parameters
----------
i, j : int or TopologyAtom
Atoms or atom indices to check
Returns
-------
is_bonded : bool
True if atoms are bonded, False otherwise.
"""
try:
self.get_bond_between(i, j)
return True
except NotBondedError:
return False
def atom(self, atom_topology_index):
"""
Get the TopologyAtom at a given Topology atom index.
Parameters
----------
atom_topology_index : int
The index of the TopologyAtom in this Topology
Returns
-------
An openff.toolkit.topology.TopologyAtom
"""
assert type(atom_topology_index) is int
assert 0 <= atom_topology_index < self.n_topology_atoms
this_molecule_start_index = 0
next_molecule_start_index = 0
for topology_molecule in self._topology_molecules:
next_molecule_start_index += topology_molecule.n_atoms
if next_molecule_start_index > atom_topology_index:
atom_molecule_index = atom_topology_index - this_molecule_start_index
# NOTE: the index here should still be in the topology index order, NOT the reference molecule's
return topology_molecule.atom(atom_molecule_index)
this_molecule_start_index += topology_molecule.n_atoms
# Potentially more computationally efficient lookup ( O(largest_molecule_natoms)? )
# start_index_2_top_mol is an ordered dict of [starting_atom_index] --> [topology_molecule]
# search_range = range(atom_topology_index - largest_molecule_natoms, atom_topology_index)
# search_index = atom_topology_index
# while not(search_index in start_index_2_top_mol.keys()): # Only efficient if start_index_2_top_mol.keys() is a set (constant time lookups)
# search_index -= 1
# topology_molecule = start_index_2_top_mol(search_index)
# atom_molecule_index = atom_topology_index - search_index
# return topology_molecule.atom(atom_molecule_index)
def virtual_site(self, vsite_topology_index):
"""
Get the TopologyAtom at a given Topology atom index.
Parameters
----------
vsite_topology_index : int
The index of the TopologyVirtualSite in this Topology
Returns
-------
An openff.toolkit.topology.TopologyVirtualSite
"""
assert type(vsite_topology_index) is int
assert 0 <= vsite_topology_index < self.n_topology_virtual_sites
this_molecule_start_index = 0
next_molecule_start_index = 0
for topology_molecule in self._topology_molecules:
next_molecule_start_index += topology_molecule.n_virtual_sites
if next_molecule_start_index > vsite_topology_index:
vsite_molecule_index = vsite_topology_index - this_molecule_start_index
return topology_molecule.virtual_site(vsite_molecule_index)
this_molecule_start_index += topology_molecule.n_virtual_sites
def bond(self, bond_topology_index):
"""
Get the TopologyBond at a given Topology bond index.
Parameters
----------
bond_topology_index : int
The index of the TopologyBond in this Topology
Returns
-------
An openff.toolkit.topology.TopologyBond
"""
assert type(bond_topology_index) is int
assert 0 <= bond_topology_index < self.n_topology_bonds
this_molecule_start_index = 0
next_molecule_start_index = 0
for topology_molecule in self._topology_molecules:
next_molecule_start_index += topology_molecule.n_bonds
if next_molecule_start_index > bond_topology_index:
bond_molecule_index = bond_topology_index - this_molecule_start_index
return topology_molecule.bond(bond_molecule_index)
this_molecule_start_index += topology_molecule.n_bonds
def add_particle(self, particle):
"""Add a Particle to the Topology.
Parameters
----------
particle : Particle
The Particle to be added.
The Topology will take ownership of the Particle.
"""
pass
def add_molecule(self, molecule, local_topology_to_reference_index=None):
"""Add a Molecule to the Topology. You can optionally request that the atoms be added to the Topology in
a different order than they appear in the Molecule.
Parameters
----------
molecule : Molecule
The Molecule to be added.
local_topology_to_reference_index: dict, optional, default = None
Dictionary of {TopologyMolecule_atom_index : Molecule_atom_index} for the TopologyMolecule that will be
built. If None, this function will add the atoms to the Topology in the order that they appear in the
reference molecule.
Returns
-------
index : int
The index of this molecule in the topology
"""
from openff.toolkit.topology.molecule import FrozenMolecule, Molecule
if local_topology_to_reference_index is None:
local_topology_to_reference_index = dict(
(i, i) for i in range(molecule.n_atoms)
)
mol_smiles = molecule.to_smiles()
reference_molecule = None
for potential_ref_mol in self._reference_molecule_to_topology_molecules.keys():
if mol_smiles == potential_ref_mol.to_smiles():
# If the molecule is already in the Topology.reference_molecules, add another reference to it in
# Topology.molecules
reference_molecule = potential_ref_mol
# Graph-match this molecule to see if it's in the same order
# Default settings use full matching
_, atom_map = Molecule.are_isomorphic(
molecule, reference_molecule, return_atom_map=True
)
if atom_map is None:
raise Exception(1)
new_mapping = {}
for local_top_idx, ref_idx in local_topology_to_reference_index.items():
new_mapping[local_top_idx] = atom_map[ref_idx]
local_topology_to_reference_index = new_mapping
# raise Exception(local_topology_to_reference_index)
break
if reference_molecule is None:
# If it's a new unique molecule, make and store an immutable copy of it
reference_molecule = FrozenMolecule(molecule)
self._reference_molecule_to_topology_molecules[reference_molecule] = list()
topology_molecule = TopologyMolecule(
reference_molecule, self, local_topology_to_reference_index
)
self._topology_molecules.append(topology_molecule)
self._reference_molecule_to_topology_molecules[reference_molecule].append(
self._topology_molecules[-1]
)
index = len(self._topology_molecules) - 1
return index
def add_constraint(self, iatom, jatom, distance=True):
"""
Mark a pair of atoms as constrained.
Constraints between atoms that are not bonded (e.g., rigid waters) are permissible.
Parameters
----------
iatom, jatom : Atom
Atoms to mark as constrained
These atoms may be bonded or not in the Topology
distance : simtk.unit.Quantity, optional, default=True
Constraint distance
``True`` if distance has yet to be determined
``False`` if constraint is to be removed
"""
# Check that constraint hasn't already been specified.
if (iatom, jatom) in self._constrained_atom_pairs:
existing_distance = self._constrained_atom_pairs[(iatom, jatom)]
if unit.is_quantity(existing_distance) and (distance is True):
raise Exception(
f"Atoms ({iatom},{jatom}) already constrained with distance {existing_distance} but attempting to override with unspecified distance"
)
if (existing_distance is True) and (distance is True):
raise Exception(
f"Atoms ({iatom},{jatom}) already constrained with unspecified distance but attempting to override with unspecified distance"
)
if distance is False:
del self._constrained_atom_pairs[(iatom, jatom)]
del self._constrained_atom_pairs[(jatom, iatom)]
return
self._constrained_atom_pairs[(iatom, jatom)] = distance
self._constrained_atom_pairs[(jatom, iatom)] = distance
def is_constrained(self, iatom, jatom):
"""
Check if a pair of atoms are marked as constrained.
Parameters
----------
iatom, jatom : int
Indices of atoms to mark as constrained.
Returns
-------
distance : simtk.unit.Quantity or bool
True if constrained but constraints have not yet been applied
Distance if constraint has already been added to System
"""
if (iatom, jatom) in self._constrained_atom_pairs:
return self._constrained_atom_pairs[(iatom, jatom)]
else:
return False
| {
"content_hash": "02a50f66d55d5e63f468c9ff2df6a885",
"timestamp": "",
"source": "github",
"line_count": 2973,
"max_line_length": 203,
"avg_line_length": 36.883619239825094,
"alnum_prop": 0.5848980894624048,
"repo_name": "open-forcefield-group/openforcefield",
"id": "6fa418aab99291d121e4c718732790000172eaaf",
"size": "109891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openff/toolkit/topology/topology.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "81"
},
{
"name": "Jupyter Notebook",
"bytes": "1974319"
},
{
"name": "Python",
"bytes": "289219"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Entry.pingback_enabled'
db.add_column('zinnia_entry', 'pingback_enabled', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True))
def backwards(self, orm):
# Deleting field 'Entry.pingback_enabled'
db.delete_column('zinnia_entry', 'pingback_enabled')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.entry': {
'Meta': {'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['zinnia.Category']", 'symmetrical': 'False'}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'pingback_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['zinnia']
| {
"content_hash": "3ecd6a793b2eb34870e583ed2c420a32",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 178,
"avg_line_length": 71.54444444444445,
"alnum_prop": 0.5544339183102966,
"repo_name": "westinedu/newertrends",
"id": "91fd15c108c1144f1e03a0accf4bd638ea55fa3a",
"size": "6439",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zinnia/migrations/0002_auto__add_field_entry_pingback_enabled.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "450683"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "5511333"
},
{
"name": "Ruby",
"bytes": "249"
},
{
"name": "Shell",
"bytes": "1355"
}
],
"symlink_target": ""
} |
"""Constants for the sia integration."""
from homeassistant.const import Platform
PLATFORMS = [Platform.ALARM_CONTROL_PANEL, Platform.BINARY_SENSOR]
DOMAIN = "sia"
ATTR_CODE = "last_code"
ATTR_ZONE = "last_zone"
ATTR_MESSAGE = "last_message"
ATTR_ID = "last_id"
ATTR_TIMESTAMP = "last_timestamp"
TITLE = "SIA Alarm on port {}"
CONF_ACCOUNT = "account"
CONF_ACCOUNTS = "accounts"
CONF_ADDITIONAL_ACCOUNTS = "additional_account"
CONF_ENCRYPTION_KEY = "encryption_key"
CONF_IGNORE_TIMESTAMPS = "ignore_timestamps"
CONF_PING_INTERVAL = "ping_interval"
CONF_ZONES = "zones"
SIA_NAME_FORMAT = "{} - {} - zone {} - {}"
SIA_UNIQUE_ID_FORMAT_ALARM = "{}_{}_{}"
SIA_UNIQUE_ID_FORMAT_BINARY = "{}_{}_{}_{}"
SIA_HUB_ZONE = 0
SIA_EVENT = "sia_event_{}_{}"
| {
"content_hash": "7f5a80b1e5656791e6e1eb3b4c753f50",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 26.75,
"alnum_prop": 0.684913217623498,
"repo_name": "home-assistant/home-assistant",
"id": "183b3422f784d007faf357b0e13f1a148f97741b",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sia/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import hashlib
import hmac
import time
from six.moves.urllib import parse as urlparse
from tempest_lib.common.utils import data_utils
from tempest.api.object_storage import base
from tempest import test
class ObjectFormPostTest(base.BaseObjectTest):
metadata = {}
containers = []
@classmethod
def resource_setup(cls):
super(ObjectFormPostTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.object_name = data_utils.rand_name(name='ObjectTemp')
cls.container_client.create_container(cls.container_name)
cls.containers = [cls.container_name]
cls.key = 'Meta'
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
def setUp(self):
super(ObjectFormPostTest, self).setUp()
# make sure the metadata has been set
account_client_metadata, _ = \
self.account_client.list_account_metadata()
self.assertIn('x-account-meta-temp-url-key',
account_client_metadata)
self.assertEqual(
account_client_metadata['x-account-meta-temp-url-key'],
self.key)
@classmethod
def resource_cleanup(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
cls.delete_containers(cls.containers)
super(ObjectFormPostTest, cls).resource_cleanup()
def get_multipart_form(self, expires=600):
path = "%s/%s/%s" % (
urlparse.urlparse(self.container_client.base_url).path,
self.container_name,
self.object_name)
redirect = ''
max_file_size = 104857600
max_file_count = 10
expires += int(time.time())
hmac_body = '%s\n%s\n%s\n%s\n%s' % (path,
redirect,
max_file_size,
max_file_count,
expires)
signature = hmac.new(self.key, hmac_body, hashlib.sha1).hexdigest()
fields = {'redirect': redirect,
'max_file_size': str(max_file_size),
'max_file_count': str(max_file_count),
'expires': str(expires),
'signature': signature}
boundary = '--boundary--'
data = []
for (key, value) in fields.items():
data.append('--' + boundary)
data.append('Content-Disposition: form-data; name="%s"' % key)
data.append('')
data.append(value)
data.append('--' + boundary)
data.append('Content-Disposition: form-data; '
'name="file1"; filename="testfile"')
data.append('Content-Type: application/octet-stream')
data.append('')
data.append('hello world')
data.append('--' + boundary + '--')
data.append('')
body = '\r\n'.join(data)
content_type = 'multipart/form-data; boundary=%s' % boundary
return body, content_type
@test.idempotent_id('80fac02b-6e54-4f7b-be0d-a965b5cbef76')
@test.requires_ext(extension='formpost', service='object')
def test_post_object_using_form(self):
body, content_type = self.get_multipart_form()
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
url = "%s/%s" % (self.container_name, self.object_name)
resp, body = self.object_client.post(url, body, headers=headers)
self.assertHeaders(resp, "Object", "POST")
# Ensure object is available
resp, body = self.object_client.get("%s/%s%s" % (
self.container_name, self.object_name, "testfile"))
self.assertHeaders(resp, "Object", "GET")
self.assertEqual(body, "hello world")
| {
"content_hash": "b497dd04df77a526f473dbec75ba1f3d",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 35.0990990990991,
"alnum_prop": 0.574435318275154,
"repo_name": "yamt/tempest",
"id": "ce587d74eceda2530dbc08f0fc419cf82c1fe4ba",
"size": "4503",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/test_object_formpost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2739641"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
"""
Module containing a preprocessor that removes cells if they match
one or more regular expression.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Set, Unicode
from . import ClearOutputPreprocessor
class TagRemovePreprocessor(ClearOutputPreprocessor):
"""
Removes cells from a notebook that have tags that designate they are to be
removed prior to exporting the notebook.
Traitlets:
----------
remove_cell_tags: removes cells tagged with these values
remove_all_output_tags: removes entire output areas on cells
tagged with these values
remove_single_output_tags: removes individual output objects on
outputs tagged with these values
"""
remove_cell_tags = Set(Unicode, default_value=[],
help=("Tags indicating which cells are to be removed,"
"matches tags in `cell.metadata.tags`.")).tag(config=True)
remove_all_outputs_tags = Set(Unicode, default_value=[],
help=("Tags indicating cells for which the outputs are to be removed,"
"matches tags in `cell.metadata.tags`.")).tag(config=True)
remove_single_output_tags = Set(Unicode, default_value=[],
help=("Tags indicating which individual outputs are to be removed,"
"matches output *i* tags in `cell.outputs[i].metadata.tags`.")
).tag(config=True)
remove_input_tags = Set(Unicode, default_value=[],
help=("Tags indicating cells for which input is to be removed,"
"matches tags in `cell.metadata.tags`.")).tag(config=True)
def check_cell_conditions(self, cell, resources, index):
"""
Checks that a cell has a tag that is to be removed
Returns: Boolean.
True means cell should *not* be removed.
"""
# Return true if any of the tags in the cell are removable.
return not self.remove_cell_tags.intersection(
cell.get('metadata', {}).get('tags', []))
def preprocess(self, nb, resources):
"""
Preprocessing to apply to each notebook. See base.py for details.
"""
# Skip preprocessing if the list of patterns is empty
if not any([self.remove_cell_tags,
self.remove_all_outputs_tags,
self.remove_single_output_tags,
self.remove_input_tags
]):
return nb, resources
# Filter out cells that meet the conditions
nb.cells = [self.preprocess_cell(cell, resources, index)[0]
for index, cell in enumerate(nb.cells)
if self.check_cell_conditions(cell, resources, index)]
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell. See base.py for details.
"""
if (self.remove_all_outputs_tags.intersection(
cell.get('metadata', {}).get('tags', []))
and cell.cell_type == 'code'):
cell.outputs = []
cell.execution_count = None
# Remove metadata associated with output
if 'metadata' in cell:
for field in self.remove_metadata_fields:
cell.metadata.pop(field, None)
if (self.remove_input_tags.intersection(
cell.get('metadata', {}).get('tags', []))):
cell.transient = {
'remove_source': True
}
if cell.get('outputs', []):
cell.outputs = [output
for output_index, output in enumerate(cell.outputs)
if self.check_output_conditions(output,
resources,
cell_index,
output_index)
]
return cell, resources
def check_output_conditions(self, output, resources,
cell_index, output_index):
"""
Checks that an output has a tag that indicates removal.
Returns: Boolean.
True means output should *not* be removed.
"""
return not self.remove_single_output_tags.intersection(
output.get('metadata', {}).get('tags', []))
| {
"content_hash": "ed556e4a478af24456bbce0cf56a3268",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 82,
"avg_line_length": 39.426086956521736,
"alnum_prop": 0.559108954565505,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "99869253cc398b7bc6d1e0b9abe24ebb7ae0c311",
"size": "4534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/nbconvert/preprocessors/tagremove.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from .base import OccurrenceDataTestCase
from ..models import Occurrence, Notification
from ..tasks import generate_occurrences, send_occurrence_notifications
class OccurrenceAppTestCase(OccurrenceDataTestCase):
"Integration test for larger SMS workflow."
def setUp(self):
super(OccurrenceAppTestCase, self).setUp()
self.timeline = self.create_timeline(name='Test', slug='foo')
self.milestone = self.create_milestone(timeline=self.timeline, offset=1)
self.connection = self.lookup_connections('5555555555')[0]
def test_join(self):
"Join timeline then generate upcoming occurrences."
msg = self.receive('APPT NEW FOO 123', self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
# Single occurrence should be created
generate_occurrences()
occurrence = Occurrence.objects.get(subscription__connection=self.connection, milestone=self.milestone)
tomorrow = datetime.date.today() + datetime.timedelta(days=self.milestone.offset)
self.assertEqual(tomorrow, occurrence.date)
def test_confirm_occurrence(self):
"Generate a notification and confirm an occurrence."
subscription = self.create_timeline_subscription(connection=self.connection, timeline=self.timeline)
generate_occurrences()
send_occurrence_notifications()
reminder = self.outbound.pop()
self.assertTrue(reminder.text.startswith('This is a reminder'))
msg = self.receive('APPT CONFIRM FOO {0}'.format(subscription.pin), self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
occurrence = Occurrence.objects.get(subscription__connection=self.connection, milestone=self.milestone)
self.assertTrue(occurrence.completed)
def test_made_occurrence(self):
"Mark an occurrence as seen."
yesterday = datetime.date.today() - datetime.timedelta(days=self.milestone.offset)
# Joined yesterday so occurrence would be today
subscription = self.create_timeline_subscription(
connection=self.connection, timeline=self.timeline, start=yesterday)
generate_occurrences()
send_occurrence_notifications()
reminder = self.outbound.pop()
self.assertTrue(reminder.text.startswith('This is a reminder'))
msg = self.receive('APPT STATUS FOO {0} ACHIEVED'.format(subscription.pin), self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
occurrence = Occurrence.objects.get(subscription__connection=self.connection, milestone=self.milestone)
self.assertEqual(Occurrence.STATUS_ACHIEVED, occurrence.status)
def test_missed_occurrence(self):
"Mark an occurrence as missed."
yesterday = datetime.date.today() - datetime.timedelta(days=self.milestone.offset)
# Joined yesterday so occurrence would be today
subscription = self.create_timeline_subscription(
connection=self.connection, timeline=self.timeline, start=yesterday)
generate_occurrences()
send_occurrence_notifications()
reminder = self.outbound.pop()
self.assertTrue(reminder.text.startswith('This is a reminder'))
msg = self.receive('APPT STATUS FOO {0} MISSED'.format(subscription.pin), self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
occurrence = Occurrence.objects.get(subscription__connection=self.connection, milestone=self.milestone)
self.assertEqual(Occurrence.STATUS_MISSED, occurrence.status)
def test_join_then_quit(self):
"Join a timeline then quit."
msg = self.receive('APPT NEW FOO 123', self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
msg = self.receive('APPT QUIT FOO 123', self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
generate_occurrences()
# No occurrences should be generated
occurrences = Occurrence.objects.filter(subscription__connection=self.connection)
self.assertEqual(0, occurrences.count())
def test_quit_reminders(self):
"Don't send reminders for unsubscribed users."
msg = self.receive('APPT NEW FOO 123', self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
generate_occurrences()
msg = self.receive('APPT QUIT FOO 123', self.connection)
reply = self.outbound.pop()
self.assertTrue(reply.text.startswith('Thank you'))
send_occurrence_notifications()
self.assertEqual(0, len(self.outbound), self.outbound)
occurrence = Occurrence.objects.get(subscription__connection=self.connection, milestone=self.milestone)
notifications = Notification.objects.filter(occurrence=occurrence)
self.assertEqual(0, notifications.count())
| {
"content_hash": "d6cfff6b72a73ed2574a301bfc4437fa",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 111,
"avg_line_length": 51.16831683168317,
"alnum_prop": 0.6989164086687306,
"repo_name": "ewheeler/rapidsms-timelines",
"id": "a5ee9524e58910a47fea27712d3ff911458c1f5d",
"size": "5168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timelines/tests/test_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17771"
},
{
"name": "JavaScript",
"bytes": "437753"
},
{
"name": "Python",
"bytes": "186286"
},
{
"name": "Shell",
"bytes": "5126"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="ticktextsrc", parent_name="indicator.gauge.axis", **kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "df1bb121dd2f966a919cb5d086b55705",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.6037735849056604,
"repo_name": "plotly/python-api",
"id": "f690812d0b4f52d3058243b8e8df2cc4ab5de3c5",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/indicator/gauge/axis/_ticktextsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:24776")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:24776")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Speedcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| {
"content_hash": "c01be7e67a8596132c082816c4e53976",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.19753086419753,
"alnum_prop": 0.6618622448979592,
"repo_name": "studio666/Speedcoin",
"id": "7b7ec9e08543241c19790e72fa11da84dceebecb",
"size": "7840",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32391"
},
{
"name": "C++",
"bytes": "2613625"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18420"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "13723"
},
{
"name": "NSIS",
"bytes": "5946"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69723"
},
{
"name": "QMake",
"bytes": "15264"
},
{
"name": "Shell",
"bytes": "13234"
}
],
"symlink_target": ""
} |
"""
:mod:`trove` -- Cloud PaaS Database Platform
===================================
.. automodule:: trove
:platform: Unix
:synopsis: Platform-As-A-Service Database Cloud
.. moduleauthor:: Michael Basnight <mbasnight@gmail.com>
"""
| {
"content_hash": "e958008efe9f50e6c4cf2a817d263b6a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.5983263598326359,
"repo_name": "citrix-openstack/build-trove",
"id": "b3bc3ec742d7f5a6a75187d7faa60f1bed33b428",
"size": "919",
"binary": false,
"copies": "3",
"ref": "refs/heads/ctx-nova-network-smoke-latest",
"path": "trove/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1725275"
},
{
"name": "Shell",
"bytes": "5512"
}
],
"symlink_target": ""
} |
import ShareYourSystem as SYS
#Definition a Pymongoer
MyPymongoer=SYS.PymongoerClass(
).pymongo(
**{
'FolderingPathVariable':SYS.Pymongoer.LocalFolderPathStr
}
)
#remove
MyPymongoer.PymongoneClientVariable['MyDatabase']
MyPymongoer.PymongoneClientVariable.MyDatabase.ThingsCollection.remove({})
MyPymongoer.PymongoneClientVariable.MyDatabase.ThingsCollection.insert({'MyStr':'hello'})
#Definition the AttestedStr
SYS._print('MyPymongoer is '+SYS._str(MyPymongoer)+'\n')
#print
print('ThingsCollection fetch gives')
SYS._print(
MyPymongoer.pymongoview('MyDatabase')
)
#close
MyPymongoer.process(_ActionStr='kill')
| {
"content_hash": "1363367c9a0ec071ddb0ecba5d324b75",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.792063492063492,
"repo_name": "Ledoux/ShareYourSystem",
"id": "5c997dc9cf3951469970345e553652c3b70a6db1",
"size": "646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Interfacers/Pymongoer/01_ExampleDoc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
from typing import Any
from zerver.lib.test_classes import WebhookTestCase
class BeeminderHookTests(WebhookTestCase):
STREAM_NAME = 'beeminder'
URL_TEMPLATE = u"/api/v1/external/beeminder?api_key={api_key}&stream={stream}"
@patch('zerver.webhooks.beeminder.view.time.time')
def test_beeminder_derail(self, time: Any) -> None:
time.return_value = 1517739100 # 5.6 hours from fixture value
expected_topic = u"beekeeper"
expected_message = '\n'.join([
'You are going to derail from goal **gainweight** in **{:0.1f} hours**'.format(5.6),
' You need **+2 in 7 days (60)** to avoid derailing',
' * Pledge: **0$** :relieved:'
])
self.send_and_test_stream_message('derail',
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded")
@patch('zerver.webhooks.beeminder.view.time.time')
def test_beeminder_derail_worried(self, time: Any) -> None:
time.return_value = 1517739100 # 5.6 hours from fixture value
expected_topic = u"beekeeper"
expected_message = '\n'.join([
'You are going to derail from goal **gainweight** in **{:0.1f} hours**'.format(5.6),
' You need **+2 in 7 days (60)** to avoid derailing',
' * Pledge: **5$** :worried:'
])
self.send_and_test_stream_message('derail_worried',
expected_topic,
expected_message,
content_type="application/json")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("beeminder", fixture_name, file_type="json")
| {
"content_hash": "108f2e80052a33b3e44d58b448af0b6f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 96,
"avg_line_length": 48.15384615384615,
"alnum_prop": 0.5548455804046858,
"repo_name": "dhcrzf/zulip",
"id": "5272cb9a49b08530eb97362f2fd59f703966c748",
"size": "1902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/beeminder/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
| {
"content_hash": "d491f2e56f2cd7991823fcda8036c6cf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 17.857142857142858,
"alnum_prop": 0.68,
"repo_name": "wilima/herocomp",
"id": "18075f9427c8badb507180e5a8fdc8564ca67e8f",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "herocomp/tools/ErrorOutput.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "8512"
},
{
"name": "Assembly",
"bytes": "228671"
},
{
"name": "C",
"bytes": "732"
},
{
"name": "Python",
"bytes": "239452"
},
{
"name": "Shell",
"bytes": "453"
}
],
"symlink_target": ""
} |
import logging
from sqlalchemy.engine import Engine
from fedlearner_webconsole.composer.models import OptimisticLock
from fedlearner_webconsole.db import get_session
class OpLocker(object):
def __init__(self, name: str, db_engine: Engine):
"""Optimistic Lock
Args:
name: lock name should be unique in same thread
"""
self._name = name
self._version = 0
self._has_lock = False
self.db_engine = db_engine
@property
def name(self) -> str:
return self._name
@property
def version(self) -> int:
return self._version
def try_lock(self) -> 'OpLocker':
with get_session(self.db_engine) as session:
try:
lock = session.query(OptimisticLock).filter_by(
name=self._name).first()
if lock:
self._has_lock = True
self._version = lock.version
return self
new_lock = OptimisticLock(name=self._name,
version=self._version)
session.add(new_lock)
session.commit()
self._has_lock = True
return self
except Exception as e: # pylint: disable=broad-except
logging.error(f'failed to require lock, exception: {e}')
return self
def is_latest_version(self) -> bool:
if not self._has_lock:
return False
with get_session(self.db_engine) as session:
try:
new_lock = session.query(OptimisticLock).filter_by(
name=self._name).first()
if not new_lock:
return False
logging.info(f'[op_locker] version, current: {self._version}, '
f'new: {new_lock.version}')
return self._version == new_lock.version
except Exception as e: # pylint: disable=broad-except
logging.error(
f'failed to check lock is conflict, exception: {e}')
return False
def update_version(self) -> bool:
# double check
if not self.is_latest_version():
return False
with get_session(self.db_engine) as session:
try:
lock = session.query(OptimisticLock).filter_by(
name=self._name).first()
lock.version = self._version + 1
session.commit()
return True
except Exception as e: # pylint: disable=broad-except
logging.error(f'failed to update lock version, exception: {e}')
return False
| {
"content_hash": "1e00ca9f6f2a02ab1663ee9d0ef22546",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 34.4125,
"alnum_prop": 0.5219760261532873,
"repo_name": "bytedance/fedlearner",
"id": "8b0bdd40437963ae08e22625e24dbd3c04c9a22a",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_console_v2/api/fedlearner_webconsole/composer/op_locker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "25817"
},
{
"name": "CSS",
"bytes": "7766"
},
{
"name": "Dockerfile",
"bytes": "6341"
},
{
"name": "Go",
"bytes": "163506"
},
{
"name": "HTML",
"bytes": "3527"
},
{
"name": "JavaScript",
"bytes": "482972"
},
{
"name": "Less",
"bytes": "14981"
},
{
"name": "Lua",
"bytes": "8088"
},
{
"name": "Makefile",
"bytes": "2869"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Mustache",
"bytes": "35891"
},
{
"name": "Python",
"bytes": "2412335"
},
{
"name": "Shell",
"bytes": "118210"
},
{
"name": "TypeScript",
"bytes": "805827"
}
],
"symlink_target": ""
} |
import nltk
nltk.download() | {
"content_hash": "af71c921f858798e49f5fb14cb4980ec",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 15,
"avg_line_length": 13.5,
"alnum_prop": 0.8148148148148148,
"repo_name": "b09dan/universities_sentiment",
"id": "3ff3043ee5a059f9b94ac7303173b56346b00c7f",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text_class/demo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1364363377"
},
{
"name": "Jupyter Notebook",
"bytes": "1841442"
},
{
"name": "Python",
"bytes": "51191"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
from watchman.integration.lib import WatchmanInstance
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestWatchmanWait(WatchmanTestCase.WatchmanTestCase):
def requiresPersistentSession(self) -> bool:
return True
def spawnWatchmanWait(self, cmdArgs):
wait_script = os.environ.get("WATCHMAN_WAIT_PATH")
if wait_script:
args = [wait_script]
else:
args = [
sys.executable,
os.path.join(os.environ["WATCHMAN_PYTHON_BIN"], "watchman-wait"),
]
args.extend(cmdArgs)
env = os.environ.copy()
sock_path = WatchmanInstance.getSharedInstance().getSockPath()
env["WATCHMAN_SOCK"] = sock_path.legacy_sockpath()
pywatchman_path = env.get("PYWATCHMAN_PATH")
if pywatchman_path:
env["PYTHONPATH"] = pywatchman_path
return subprocess.Popen(
args, env=env, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
def assertWaitedFileList(self, stdout, expected) -> None:
stdout = stdout.decode("utf-8").rstrip()
files = [f.rstrip() for f in stdout.split("\n")]
self.assertFileListContains(files, expected)
def assertWaitForWmWaitWatch(self, root) -> None:
"""Wait for the specified root to appear in the watch list;
watchman-wait will initiate that asynchronously and we have
to wait for that before proceeding.
Then wait for the watch to be ready to query, otherwise the
test expectations will not be reliably met."""
# wait for the watch to appear
self.assertWaitFor(
lambda: self.rootIsWatched(root),
message="%s was not watched by watchman-wait" % root,
)
# now wait for it to be ready to query. The easiest way
# to do this is to ask for the watch ourselves, as that
# will block us until it is ready
self.watchmanCommand("watch", root)
def test_wait(self) -> None:
root = self.mkdtemp()
self.touchRelative(root, "foo")
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "foo")
wmwait = self.spawnWatchmanWait(
["--relative", root, "--max-events", "8", "-t", "3", root]
)
self.assertWaitForWmWaitWatch(root)
self.touchRelative(root, "bar")
self.removeRelative(root, "foo")
self.touchRelative(a_dir, "bar")
self.removeRelative(a_dir, "foo")
b_dir = os.path.join(root, "b")
os.mkdir(b_dir)
self.touchRelative(b_dir, "foo")
(stdout, stderr) = wmwait.communicate()
self.assertWaitedFileList(stdout, ["a/bar", "a/foo", "b/foo", "bar", "foo"])
def test_rel_root(self) -> None:
root = self.mkdtemp()
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
b_dir = os.path.join(root, "b")
os.mkdir(b_dir)
wmwait = self.spawnWatchmanWait(
["--relative", b_dir, "--max-events", "8", "-t", "6", a_dir, b_dir]
)
self.assertWaitForWmWaitWatch(b_dir)
self.assertWaitForWmWaitWatch(a_dir)
self.touchRelative(a_dir, "afoo")
self.touchRelative(b_dir, "bfoo")
a_sub_dir = os.path.join(a_dir, "asub")
os.mkdir(a_sub_dir)
b_sub_dir = os.path.join(b_dir, "bsub")
os.mkdir(b_sub_dir)
(stdout, stderr) = wmwait.communicate()
self.assertWaitedFileList(stdout, ["../a/afoo", "../a/asub", "bfoo", "bsub"])
| {
"content_hash": "ca0c8cb9859bbc6708e3c5e3e5d5d363",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 85,
"avg_line_length": 34.35849056603774,
"alnum_prop": 0.599121361889072,
"repo_name": "nodakai/watchman",
"id": "9cac023edd8a8231906a54ad0ff12c0dd521a695",
"size": "3823",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "watchman/integration/test_wm_wait.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70350"
},
{
"name": "C++",
"bytes": "1042071"
},
{
"name": "CMake",
"bytes": "84403"
},
{
"name": "CSS",
"bytes": "16964"
},
{
"name": "HTML",
"bytes": "36889"
},
{
"name": "Java",
"bytes": "165177"
},
{
"name": "JavaScript",
"bytes": "35299"
},
{
"name": "Python",
"bytes": "853620"
},
{
"name": "Ruby",
"bytes": "23525"
},
{
"name": "Rust",
"bytes": "175867"
},
{
"name": "SCSS",
"bytes": "25549"
},
{
"name": "Shell",
"bytes": "11104"
},
{
"name": "Starlark",
"bytes": "1317"
},
{
"name": "Thrift",
"bytes": "40071"
}
],
"symlink_target": ""
} |
import os
import os.path
import re
import time
import jinja2
import robot.utils
import cumulusci
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import BaseTask
from cumulusci.core.utils import process_glob_list_arg
from cumulusci.robotframework import PageObjects
from robot.libdocpkg.builder import DocumentationBuilder
from robot.libraries.BuiltIn import RobotNotRunningError
from robot.libdocpkg.robotbuilder import LibraryDocBuilder
from robot.utils import Importer
class RobotLibDoc(BaseTask):
task_options = {
"path": {
"description": (
"The path to one or more keyword libraries to be documented. "
"The path can be single a python file, a .robot file, a python "
"module (eg: cumulusci.robotframework.Salesforce) or a comma "
"separated list of any of those. Glob patterns are supported "
"for filenames (eg: ``robot/SAL/doc/*PageObject.py``). The order "
"of the files will be preserved in the generated documentation. "
"The result of pattern expansion will be sorted"
),
"required": True,
},
"output": {
"description": "The output file where the documentation will be written",
"required": True,
},
"title": {
"description": "A string to use as the title of the generated output",
"required": False,
},
}
def _validate_options(self):
super(RobotLibDoc, self)._validate_options()
self.options["path"] = process_glob_list_arg(self.options["path"])
# Attempt to collect all files that don't match existing
# files. Note: "path" could be a library module path (for example,
# cumulusci.robotframework.CumulusCI) so we only do this check for
# files that end in known library suffixes (.py, .robot, .resource).
bad_files = []
for path in self.options["path"]:
name, extension = os.path.splitext(path)
if extension in (".py", ".robot", ".resource") and not os.path.exists(path):
bad_files.append(path)
if bad_files:
if len(bad_files) == 1:
error_message = "Unable to find the input file '{}'".format(
bad_files[0]
)
else:
files = ", ".join(["'{}'".format(filename) for filename in bad_files])
error_message = "Unable to find the following input files: {}".format(
files
)
raise TaskOptionsError(error_message)
def is_pageobject_library(self, path):
"""Return True if the file looks like a page object library"""
if path.endswith(".py"):
with open(path, "r") as f:
data = f.read()
if re.search(r"@pageobject\(", data):
return True
return False
def _run_task(self):
kwfiles = []
processed_files = []
for library_name in self.options["path"]:
kwfile = KeywordFile(library_name)
try:
if self.is_pageobject_library(library_name):
PageObjects._reset()
module = Importer().import_class_or_module_by_path(
os.path.abspath(library_name)
)
kwfile.doc = module.__doc__
if hasattr(module, "TITLE"):
kwfile.title = module.TITLE
for pobj_name, pobj in sorted(PageObjects.registry.items()):
pobj = PageObjects.registry[pobj_name]
libname = "{}.{}".format(pobj.__module__, pobj.__name__)
libdoc = LibraryDocBuilder().build(libname)
libdoc.src = os.path.basename(library_name)
libdoc.pobj = libname
kwfile.add_keywords(libdoc, pobj_name)
else:
libdoc = DocumentationBuilder(library_name).build(library_name)
kwfile.add_keywords(libdoc)
# if we get here, we were able to process the file correctly
kwfiles.append(kwfile)
processed_files.append(library_name)
except RobotNotRunningError as e:
# oddly, robot's exception has a traceback embedded in the message, so we'll
# only print out the first line to hide most of the noise
self.logger.warn("unexpected error: {}".format(str(e).split("\n")[0]))
try:
with open(self.options["output"], "w") as f:
html = self._render_html(kwfiles)
f.write(html)
self.logger.info("created {}".format(f.name))
except Exception as e:
raise TaskOptionsError(
"Unable to create output file '{}' ({})".format(
self.options["output"], e.strerror
)
)
return {"files": processed_files, "html": html}
def _render_html(self, libraries):
"""Generate the html. `libraries` is a list of LibraryDocumentation objects"""
title = self.options.get("title", "Keyword Documentation")
date = time.strftime("%A %B %d, %I:%M %p")
cci_version = cumulusci.__version__
stylesheet_path = os.path.join(os.path.dirname(__file__), "stylesheet.css")
with open(stylesheet_path) as f:
stylesheet = f.read()
jinjaenv = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=False
)
jinjaenv.filters["robot_html"] = robot.utils.html_format
template = jinjaenv.get_template("template.html")
return template.render(
libraries=libraries,
title=title,
cci_version=cci_version,
stylesheet=stylesheet,
date=date,
)
class KeywordFile:
"""Helper class which represents a file and its keywords
A file may have just a bunch of keywords, or groups of
keywords organized as page objects. Each group of keywords
is stored in self.keywords, with the page object metadata
as a key.
For normal libraries, the key is an empty tuple.
"""
def __init__(self, path):
if os.path.exists(path):
self.filename = os.path.basename(path)
else:
# if it's not a file, it must be a module
self.filename = path.split(".")[-1]
self.title = self.filename
self.path = path
self.keywords = {}
def add_keywords(self, libdoc, page_object=tuple()):
self.keywords[page_object] = libdoc
| {
"content_hash": "da82e77096f837cdb6a3e8da756d2016",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 92,
"avg_line_length": 38.92045454545455,
"alnum_prop": 0.5651094890510949,
"repo_name": "SalesforceFoundation/CumulusCI",
"id": "58d94fdf12110fa2a8ae773fd15cb522d8ce94de",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/robotframework/libdoc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "754354"
},
{
"name": "RobotFramework",
"bytes": "9330"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
"""
Django settings for recloud project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h9w5qat*mdxuwjrn+z3=$s3c_f1_aDelka_Ruzeka_JeRemy_Jan_Zdenek_ZDenek2_EVA_mcz+@#u%^z24jdkdc5b!=$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, '..', 'templates')),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'recloud',
'dashboard',
'userprofile',
'servers',
'help',
'security',
'logs',
'news',
'invoices',
'store',
'storage',
# temporarily view during the development phase
'devel',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'recloud.urls'
WSGI_APPLICATION = 'recloud.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'recloud',
'USER': 'root',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, 'templates')),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..', 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, 'static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "185d0983759f14d1c7e43e9393751109",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 109,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6449511400651465,
"repo_name": "jparicka/recloud",
"id": "2fe7ec3993a67d723861dfda23168be0c201ac97",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recloud/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1938"
},
{
"name": "Python",
"bytes": "55616"
},
{
"name": "Shell",
"bytes": "195"
}
],
"symlink_target": ""
} |
import numpy as np
import unittest
from pylearn2.testing.skip import skip_if_no_data
import pylearn2.datasets.icml07 as icml07
# Basic tests to see if data is loadable
def test_MNIST_rot_back():
skip_if_no_data()
data = icml07.MNIST_rotated_background(which_set='train')
data = icml07.MNIST_rotated_background(which_set='valid')
data = icml07.MNIST_rotated_background(which_set='test')
def test_Convex():
skip_if_no_data()
data = icml07.Convex(which_set='train')
data = icml07.Convex(which_set='valid')
data = icml07.Convex(which_set='test')
def test_Rectangles():
skip_if_no_data()
data = icml07.Rectangles(which_set='train')
data = icml07.Rectangles(which_set='valid')
data = icml07.Rectangles(which_set='test')
def test_RectanglesImage():
skip_if_no_data()
data = icml07.RectanglesImage(which_set='train')
data = icml07.RectanglesImage(which_set='valid')
data = icml07.RectanglesImage(which_set='test')
# Test features
def test_split():
skip_if_no_data()
n_train=100
n_valid=200
n_test=300
data = icml07.MNIST_rotated_background(which_set='train', split=(n_train, n_valid, n_test))
assert data.X.shape[0] == n_train, "Unexpected size of train set"
assert data.y.shape[0] == n_train, "Unexpected size of train set"
data = icml07.MNIST_rotated_background(which_set='valid', split=(n_train, n_valid, n_test))
assert data.X.shape[0] == n_valid, "Unexpected size of validation set"
assert data.y.shape[0] == n_valid, "Unexpected size of validation set"
data = icml07.MNIST_rotated_background(which_set='test', split=(n_train, n_valid, n_test))
assert data.X.shape[0] == n_test, "Unexpected size of test set"
assert data.y.shape[0] == n_test, "Unexpected size of test set"
def test_one_hot():
skip_if_no_data()
data = icml07.MNIST_rotated_background(which_set='train', one_hot=True, split=(100,100,100))
assert data.y.shape[1] == 10 # MNITS hast 10 classes
data = icml07.Rectangles(which_set='train', one_hot=True, split=(100,100,100))
assert data.y.shape[1] == 2 # Two classes
| {
"content_hash": "7b2ff38ed9f17a04d1a18b4703d201fb",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 96,
"avg_line_length": 36.067796610169495,
"alnum_prop": 0.6828007518796992,
"repo_name": "KennethPierce/pylearnk",
"id": "6fbd1ad10d941c458ccabcf3c74cce38caf26987",
"size": "2128",
"binary": false,
"copies": "1",
"ref": "refs/heads/fixNogil/master",
"path": "pylearn2/datasets/tests/test_icml07.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1266727"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3410626"
},
{
"name": "Shell",
"bytes": "4195"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FinancialAidReviewData.paired_with'
db.add_column(u'finaid_financialaidreviewdata', 'paired_with',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.hotel_notes'
db.add_column(u'finaid_financialaidreviewdata', 'hotel_notes',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.travel_amount'
db.add_column(u'finaid_financialaidreviewdata', 'travel_amount',
self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=8, decimal_places=2),
keep_default=False)
# Adding field 'FinancialAidReviewData.tutorial_amount'
db.add_column(u'finaid_financialaidreviewdata', 'tutorial_amount',
self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=8, decimal_places=2),
keep_default=False)
# Adding field 'FinancialAidReviewData.registration_amount'
db.add_column(u'finaid_financialaidreviewdata', 'registration_amount',
self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=8, decimal_places=2),
keep_default=False)
# Adding field 'FinancialAidReviewData.grant_letter_sent'
db.add_column(u'finaid_financialaidreviewdata', 'grant_letter_sent',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.cash_check'
db.add_column(u'finaid_financialaidreviewdata', 'cash_check',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.notes'
db.add_column(u'finaid_financialaidreviewdata', 'notes',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.travel_signed'
db.add_column(u'finaid_financialaidreviewdata', 'travel_signed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'FinancialAidReviewData.travel_cash_check'
db.add_column(u'finaid_financialaidreviewdata', 'travel_cash_check',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.travel_check_number'
db.add_column(u'finaid_financialaidreviewdata', 'travel_check_number',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.travel_preferred_disbursement'
db.add_column(u'finaid_financialaidreviewdata', 'travel_preferred_disbursement',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'FinancialAidReviewData.promo_code'
db.add_column(u'finaid_financialaidreviewdata', 'promo_code',
self.gf('django.db.models.fields.CharField')(default='', max_length=20, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FinancialAidReviewData.paired_with'
db.delete_column(u'finaid_financialaidreviewdata', 'paired_with_id')
# Deleting field 'FinancialAidReviewData.hotel_notes'
db.delete_column(u'finaid_financialaidreviewdata', 'hotel_notes')
# Deleting field 'FinancialAidReviewData.travel_amount'
db.delete_column(u'finaid_financialaidreviewdata', 'travel_amount')
# Deleting field 'FinancialAidReviewData.tutorial_amount'
db.delete_column(u'finaid_financialaidreviewdata', 'tutorial_amount')
# Deleting field 'FinancialAidReviewData.registration_amount'
db.delete_column(u'finaid_financialaidreviewdata', 'registration_amount')
# Deleting field 'FinancialAidReviewData.grant_letter_sent'
db.delete_column(u'finaid_financialaidreviewdata', 'grant_letter_sent')
# Deleting field 'FinancialAidReviewData.cash_check'
db.delete_column(u'finaid_financialaidreviewdata', 'cash_check')
# Deleting field 'FinancialAidReviewData.notes'
db.delete_column(u'finaid_financialaidreviewdata', 'notes')
# Deleting field 'FinancialAidReviewData.travel_signed'
db.delete_column(u'finaid_financialaidreviewdata', 'travel_signed')
# Deleting field 'FinancialAidReviewData.travel_cash_check'
db.delete_column(u'finaid_financialaidreviewdata', 'travel_cash_check')
# Deleting field 'FinancialAidReviewData.travel_check_number'
db.delete_column(u'finaid_financialaidreviewdata', 'travel_check_number')
# Deleting field 'FinancialAidReviewData.travel_preferred_disbursement'
db.delete_column(u'finaid_financialaidreviewdata', 'travel_preferred_disbursement')
# Deleting field 'FinancialAidReviewData.promo_code'
db.delete_column(u'finaid_financialaidreviewdata', 'promo_code')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'finaid.financialaidapplication': {
'Meta': {'object_name': 'FinancialAidApplication'},
'beginner_resources': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'experience_level': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'first_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hotel_arrival_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'hotel_departure_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'hotel_grant_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hotel_nights': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'involvement': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'portfolios': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'presented': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'presenting': ('django.db.models.fields.IntegerField', [], {}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'pyladies_grant_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_grant_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sex': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'travel_amount_requested': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '8', 'decimal_places': '2'}),
'travel_grant_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'travel_plans': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'tutorial_grant_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_of_python': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'financial_aid'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'want_to_learn': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'what_you_want': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'finaid.financialaidapplicationperiod': {
'Meta': {'object_name': 'FinancialAidApplicationPeriod'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {})
},
u'finaid.financialaidmessage': {
'Meta': {'ordering': "['submitted_at']", 'object_name': 'FinancialAidMessage'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': u"orm['finaid.FinancialAidApplication']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'finaid.financialaidreviewdata': {
'Meta': {'object_name': 'FinancialAidReviewData'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'review'", 'unique': 'True', 'to': u"orm['finaid.FinancialAidApplication']"}),
'cash_check': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'grant_letter_sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hotel_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '8', 'decimal_places': '2'}),
'hotel_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'paired_with': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'promo_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '8', 'decimal_places': '2'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'travel_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '8', 'decimal_places': '2'}),
'travel_cash_check': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'travel_check_number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'travel_preferred_disbursement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'travel_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tutorial_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '8', 'decimal_places': '2'})
}
}
complete_apps = ['finaid'] | {
"content_hash": "2c92e752bf8763493c8a603519bb6375",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 187,
"avg_line_length": 68.5158371040724,
"alnum_prop": 0.5982036719059569,
"repo_name": "alex/pycon",
"id": "54af990bb11e6e89fc49ff8bbedd09a1a1f9f988",
"size": "15166",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pycon/finaid/migrations/0008_auto__add_field_financialaidreviewdata_paired_with__add_field_financia.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Petition',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=100)),
('text', models.TextField()),
('deadline', models.DateTimeField()),
('status', models.CharField(default='V', choices=[('V', 'Voting'), ('A', 'Accepted'), ('D', 'Declined')], max_length=1)),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='petitions')),
('responsible', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL, related_name='responsible')),
],
),
]
| {
"content_hash": "b710fc134aa177a18f49c1f801191e84",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 137,
"avg_line_length": 40.96296296296296,
"alnum_prop": 0.5922242314647378,
"repo_name": "kpi-petitions/project-y",
"id": "a9a84bc9ce4a706adc14d805f0ea9c0544f7d046",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "petitions/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31107"
},
{
"name": "HTML",
"bytes": "47573"
},
{
"name": "JavaScript",
"bytes": "43884"
},
{
"name": "Python",
"bytes": "63051"
}
],
"symlink_target": ""
} |
import ipaddress
from netaddr import IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_abusix(SpiderFootPlugin):
meta = {
'name': "Abusix Mail Intelligence",
'summary': "Check if a netblock or IP address is in the Abusix Mail Intelligence blacklist.",
'flags': ['apikey'],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "https://abusix.org/",
'model': "FREE_AUTH_LIMITED",
'references': [
"https://abusix.com/products/abusix-mail-intelligence/",
"https://docs.abusix.com/105726-setup-abusix-mail-intelligence/ami%2Fsetup%2Fexample-queries",
"https://docs.abusix.com/105725-detailed-list-information/ami%2Freturn-codes",
],
'apiKeyInstructions': [
"Visit https://app.abusix.com/signup",
"Register a free account",
"Browse to 'Account Settings' page",
"The API key is listed on the 'Email protection' page."
],
'logo': "https://abusix.com/wp-content/uploads/2020/10/Footer_logo.png",
'description': "Abusix Mail Intelligence is an innovative set of blocklists (RBL/DNSBL) "
"that adds real-time threat data to your existing email protection. "
"Considered as the first line of defense, blocklists help to prevent email-borne threats "
"such as spam and malware from entering your network."
}
}
opts = {
'api_key': "",
'checkaffiliates': True,
'checkcohosts': True,
'netblocklookup': True,
'maxnetblock': 24,
'maxv6netblock': 120,
'subnetlookup': True,
'maxsubnet': 24,
'maxv6subnet': 120,
}
optdescs = {
'api_key': "Abusix Mail Intelligence API key.",
'checkaffiliates': "Apply checks to affiliates?",
'checkcohosts': "Apply checks to sites found to be co-hosted on the target's IP?",
'netblocklookup': "Look up all IPs on netblocks deemed to be owned by your target for possible blacklisted hosts on the same target subdomain/domain?",
'maxnetblock': "If looking up owned netblocks, the maximum netblock size to look up all IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
'maxv6netblock': "If looking up owned netblocks, the maximum IPv6 netblock size to look up all IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
'subnetlookup': "Look up all IPs on subnets which your target is a part of for blacklisting?",
'maxsubnet': "If looking up subnets, the maximum subnet size to look up all the IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
'maxv6subnet': "If looking up subnets, the maximum IPv6 subnet size to look up all the IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
}
results = None
errorState = False
checks = {
"127.0.0.2": "black",
"127.0.0.3": "black (composite/heuristic)",
"127.0.0.4": "exploit / authbl",
"127.0.0.5": "forged",
"127.0.0.6": "backscatter",
"127.0.0.11": "policy (generic rDNS)",
"127.0.0.12": "policy (missing rDNS)",
"127.0.0.100": "noip",
"127.0.1.1": "dblack",
"127.0.1.2": "dblack (Newly Observed Domain)",
"127.0.1.3": "dblack (Unshortened)",
"127.0.2.1": "white",
"127.0.3.1": "shorthash",
"127.0.3.2": "diskhash",
"127.0.4.1": "btc-wallets",
"127.0.5.1": "attachhash",
}
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.errorState = False
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
'IP_ADDRESS',
'IPV6_ADDRESS',
'AFFILIATE_IPADDR',
'AFFILIATE_IPV6_ADDRESS',
"NETBLOCK_MEMBER",
"NETBLOCKV6_MEMBER",
"NETBLOCK_OWNER",
"NETBLOCKV6_OWNER",
'INTERNET_NAME',
'AFFILIATE_INTERNET_NAME',
'CO_HOSTED_SITE',
]
def producedEvents(self):
return [
"BLACKLISTED_IPADDR",
"BLACKLISTED_AFFILIATE_IPADDR",
"BLACKLISTED_SUBNET",
"BLACKLISTED_NETBLOCK",
"BLACKLISTED_INTERNET_NAME",
"BLACKLISTED_AFFILIATE_INTERNET_NAME",
"BLACKLISTED_COHOST",
"MALICIOUS_IPADDR",
"MALICIOUS_AFFILIATE_IPADDR",
"MALICIOUS_NETBLOCK",
"MALICIOUS_SUBNET",
"MALICIOUS_INTERNET_NAME",
"MALICIOUS_AFFILIATE_INTERNET_NAME",
"MALICIOUS_COHOST",
]
def reverseIpAddress(self, ipaddr):
if not self.sf.validIP(ipaddr):
self.debug(f"Invalid IPv4 address {ipaddr}")
return None
return ipaddress.ip_address(ipaddr).reverse_pointer.replace('.in-addr.arpa', '')
def reverseIp6Address(self, ipaddr):
if not self.sf.validIP6(ipaddr):
self.debug(f"Invalid IPv6 address {ipaddr}")
return None
return ipaddress.ip_address(ipaddr).reverse_pointer.replace('.ip6.arpa', '')
def query(self, qaddr):
"""Query Abusix Mail Intelligence DNS.
Args:
qaddr (str): Host name or IPv4 address.
Returns:
list: Abusix DNS entries
"""
if self.sf.validIP(qaddr):
lookup = f"{self.reverseIpAddress(qaddr)}.{self.opts['api_key']}.combined.mail.abusix.zone"
elif self.sf.validIP6(qaddr):
lookup = f"{self.reverseIp6Address(qaddr)}.{self.opts['api_key']}.combined.mail.abusix.zone"
else:
lookup = f"{qaddr}.{self.opts['api_key']}.combined.mail.abusix.zone"
self.debug(f"Checking Abusix Mail Intelligence blacklist: {lookup}")
try:
return self.sf.resolveHost(lookup)
except Exception as e:
self.debug(f"Abusix Mail Intelligence did not resolve {qaddr} / {lookup}: {e}")
return None
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {event.module}")
if not self.opts['api_key']:
self.error(f"You enabled {self.__class__.__name__} but did not set an API key!")
self.errorState = True
return
if eventData in self.results:
return
self.results[eventData] = True
if eventName in ['AFFILIATE_IPADDR', 'AFFILIATE_IPV6_ADDRESS']:
if not self.opts.get('checkaffiliates', False):
return
malicious_type = "MALICIOUS_AFFILIATE_IPADDR"
blacklist_type = "BLACKLISTED_AFFILIATE_IPADDR"
elif eventName in ['IP_ADDRESS', 'IPV6_ADDRESS']:
malicious_type = "MALICIOUS_IPADDR"
blacklist_type = "BLACKLISTED_IPADDR"
elif eventName in ['NETBLOCK_MEMBER', 'NETBLOCKV6_MEMBER']:
if not self.opts['subnetlookup']:
return
if eventName == 'NETBLOCKV6_MEMBER':
max_subnet = self.opts['maxv6subnet']
else:
max_subnet = self.opts['maxsubnet']
if IPNetwork(eventData).prefixlen < max_subnet:
self.debug(f"Network size bigger than permitted: {IPNetwork(eventData).prefixlen} > {max_subnet}")
return
malicious_type = "MALICIOUS_SUBNET"
blacklist_type = "BLACKLISTED_SUBNET"
elif eventName in ['NETBLOCK_OWNER', 'NETBLOCKV6_OWNER']:
if not self.opts['netblocklookup']:
return
if eventName == 'NETBLOCKV6_OWNER':
max_netblock = self.opts['maxv6netblock']
else:
max_netblock = self.opts['maxnetblock']
if IPNetwork(eventData).prefixlen < max_netblock:
self.debug(f"Network size bigger than permitted: {IPNetwork(eventData).prefixlen} > {max_netblock}")
return
malicious_type = "MALICIOUS_NETBLOCK"
blacklist_type = "BLACKLISTED_NETBLOCK"
elif eventName == "INTERNET_NAME":
malicious_type = "MALICIOUS_INTERNET_NAME"
blacklist_type = "BLACKLISTED_INTERNET_NAME"
elif eventName == "AFFILIATE_INTERNET_NAME":
if not self.opts.get('checkaffiliates', False):
return
malicious_type = "MALICIOUS_AFFILIATE_INTERNET_NAME"
blacklist_type = "BLACKLISTED_AFFILIATE_INTERNET_NAME"
elif eventName == "CO_HOSTED_SITE":
if not self.opts.get('checkcohosts', False):
return
malicious_type = "MALICIOUS_COHOST"
blacklist_type = "BLACKLISTED_COHOST"
else:
self.debug(f"Unexpected event type {eventName}, skipping")
return
addrs = list()
if eventName.startswith("NETBLOCK"):
for addr in IPNetwork(eventData):
addrs.append(str(addr))
else:
addrs.append(eventData)
for addr in addrs:
if self.checkForStop():
return
if self.errorState:
return
res = self.query(addr)
self.results[addr] = True
if not res:
continue
self.debug(f"{addr} found in Abusix Mail Intelligence DNS")
for result in res:
k = str(result)
if k not in self.checks:
if 'mail.abusix.zone' not in result:
# This is an error. The "checks" dict may need to be updated.
self.error(f"Abusix Mail Intelligence resolved address {addr} to unknown IP address {result} not found in Abusix Mail Intelligence list.")
continue
text = f"Abusix Mail Intelligence - {self.checks[k]} [{addr}]\n<SFURL>https://lookup.abusix.com/search?q={addr}</SFURL>"
evt = SpiderFootEvent(blacklist_type, text, self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent(malicious_type, text, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_abusix class
| {
"content_hash": "44596fea6f5b5169105f29aa6c01dea1",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 162,
"avg_line_length": 38.19565217391305,
"alnum_prop": 0.5688673875924872,
"repo_name": "smicallef/spiderfoot",
"id": "d55400901b27c2496dd21ade40b7c2a7ff95b32b",
"size": "11015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/sfp_abusix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from traitsui.api import View, Item
from pychron.hardware.core.abstract_device import AbstractDevice
# ============= standard library imports ========================
# ============= local library imports ==========================
class PidController(AbstractDevice):
def load_additional_args(self, config, **kw):
from pychron.hardware.eurotherm import Eurotherm
self._cdevice = Eurotherm(name="Eurotherm")
self._cdevice.load()
return True
# from traits.api import Instance
from traitsui.api import ButtonEditor
class DevelopmentPidController(PidController):
def get_process_value(self, **kw):
return self._cdevice.get_random_value()
def traits_view(self):
v = View(
Item("_cdevice", style="custom", show_label=False),
Item(
"scan_button",
show_label=False,
editor=ButtonEditor(label_value="scan_label"),
),
Item("graph", show_label=False, style="custom"),
)
return v
# ============= EOF =============================================
| {
"content_hash": "d129fd74cde76e7c4567b7269c6cc420",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 27.023255813953487,
"alnum_prop": 0.5567986230636833,
"repo_name": "USGSDenverPychron/pychron",
"id": "efeb7156032f44d0755c525370dcad129103521a",
"size": "1964",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/hardware/pid_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
from sentry.utils.services import LazyServiceWrapper
from .base import BaseTSDB # NOQA
from .dummy import DummyTSDB
backend = LazyServiceWrapper(
BaseTSDB, settings.SENTRY_TSDB, settings.SENTRY_TSDB_OPTIONS, (DummyTSDB, )
)
backend.expose(locals())
| {
"content_hash": "23ecff30f2f2cd6a57951e5c80a215d3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.7848484848484848,
"repo_name": "jean/sentry",
"id": "744c246b0fdda6fb94502644cca26af10f6aeaa1",
"size": "330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/tsdb/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
import subprocess
import os
from time import sleep
from datetime import datetime
import yaml
class StartupMinion:
def __init__(self, conf_file='config.yml'):
with open(conf_file, 'r') as f:
conf = yaml.safe_load(f)
self.servers_path = os.path.join(
conf['base_path'],
conf['atlauncher_dir'],
conf['servers_dir'],
)
self.servers = conf['servers']
self.launch_command = conf['launch_command']
def _start(self, server):
screen_title = 'mc_' + server
server_loc = os.path.join(self.servers_path, server)
daemonize_screen = "screen -dmS " + screen_title
stuffing = "'cd " + server_loc + " && " + self.launch_command + "\n'"
stuff_screen = "screen -S " + screen_title + " -X stuff " + stuffing
bash_command = daemonize_screen + " && " + stuff_screen
logfile = os.path.join(server_loc, "logs", "latest.log")
sleep(1)
print("Let's get this party started!")
print("Starting party for: " + server)
sleep(1)
print("Running command:\n" + bash_command)
print("This may take a few minutes. Be patient, please!")
sleep(1)
subprocess.Popen(['bash', '-c', bash_command])
finished = -1
time_counter = 0
while finished == -1:
sleep(0.25)
time_counter += 0.25
if time_counter % 10 == 0:
print("Waiting for server... (%ss)" % time_counter)
with open(logfile, "r") as fh:
data = fh.read()
now = datetime.now().strftime("%H:%M:%S")
finished = data.find(
"[" + now + "] [Server thread/INFO]: Done ("
)
print("Server's up and running!")
sleep(1)
print("Have fun!")
sleep(4)
def start(self, these_servers='all'):
if these_servers == 'all':
for server in self.servers:
self._start(server)
elif isinstance(these_servers, list):
for server in these_servers:
if server in self.servers:
self._start(server)
elif isinstance(these_servers, int):
self._start(self.servers[these_servers])
else:
print("Invalid choice. Supply either a server name that exists,"
+ "or the index of a server in the config.yml list. "
"Or just leave blank to start every server in the config.")
if __name__ == "__main__":
minion = StartupMinion()
minion.start()
| {
"content_hash": "85e56335cb3cdf4d51964f32e931b80f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 33.98701298701299,
"alnum_prop": 0.5303782957585021,
"repo_name": "tlake/mc_server_scripts",
"id": "63c20ddc6c52f710fcf75e42596030f5b1e84cac",
"size": "2617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "start_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6840"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ordenes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orden',
name='operador',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='orden',
name='tecnico',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='orden',
name='tienda',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='ordenes.Tienda', null=True),
),
migrations.AddField(
model_name='modelo',
name='marca',
field=models.ForeignKey(to='ordenes.Marca'),
),
migrations.AddField(
model_name='marca',
name='tienda',
field=models.ManyToManyField(to='ordenes.Tienda'),
),
]
| {
"content_hash": "b421a1008251fa1835bfbf8984ca896c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 140,
"avg_line_length": 33.21951219512195,
"alnum_prop": 0.5976505139500734,
"repo_name": "zaresdelweb/tecnoservicio",
"id": "8427047582161bc9e3d6e49a8994cfb9c0e913a9",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tecnoservicio/ordenes/migrations/0002_auto_20150511_1235.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "816496"
},
{
"name": "Groff",
"bytes": "11774"
},
{
"name": "HTML",
"bytes": "3141359"
},
{
"name": "JavaScript",
"bytes": "1675320"
},
{
"name": "Python",
"bytes": "373251"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy.optimize import newton
from scipy.special import logit
from sklearn.utils import assert_all_finite
from sklearn.utils.fixes import sp_version, parse_version
import pytest
from sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES
from sklearn.ensemble._hist_gradient_boosting.common import Y_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.utils._testing import skip_if_32bit
def get_derivatives_helper(loss):
"""Return get_gradients() and get_hessians() functions for a given loss.
"""
def get_gradients(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
return gradients
def get_hessians(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
if loss.__class__.__name__ == 'LeastSquares':
# hessians aren't updated because they're constant:
# the value is 1 (and not 2) because the loss is actually an half
# least squares loss.
hessians = np.full_like(raw_predictions, fill_value=1)
elif loss.__class__.__name__ == 'LeastAbsoluteDeviation':
# hessians aren't updated because they're constant
hessians = np.full_like(raw_predictions, fill_value=0)
return hessians
return get_gradients, get_hessians
@pytest.mark.parametrize('loss, x0, y_true', [
('least_squares', -2., 42),
('least_squares', 117., 1.05),
('least_squares', 0., 0.),
# The argmin of binary_crossentropy for y_true=0 and y_true=1 is resp. -inf
# and +inf due to logit, cf. "complete separation". Therefore, we use
# 0 < y_true < 1.
('binary_crossentropy', 0.3, 0.1),
('binary_crossentropy', -12, 0.2),
('binary_crossentropy', 30, 0.9),
('poisson', 12., 1.),
('poisson', 0., 2.),
('poisson', -22., 10.),
])
@pytest.mark.skipif(sp_version == parse_version('1.2.0'),
reason='bug in scipy 1.2.0, see scipy issue #9608')
@skip_if_32bit
def test_derivatives(loss, x0, y_true):
# Check that gradients are zero when the loss is minimized on a single
# value/sample using Halley's method with the first and second order
# derivatives computed by the Loss instance.
# Note that methods of Loss instances operate on arrays while the newton
# root finder expects a scalar or a one-element array for this purpose.
loss = _LOSSES[loss](sample_weight=None)
y_true = np.array([y_true], dtype=Y_DTYPE)
x0 = np.array([x0], dtype=Y_DTYPE).reshape(1, 1)
get_gradients, get_hessians = get_derivatives_helper(loss)
def func(x: np.ndarray) -> np.ndarray:
if isinstance(loss, _LOSSES['binary_crossentropy']):
# Subtract a constant term such that the binary cross entropy
# has its minimum at zero, which is needed for the newton method.
actual_min = loss.pointwise_loss(y_true, logit(y_true))
return loss.pointwise_loss(y_true, x) - actual_min
else:
return loss.pointwise_loss(y_true, x)
def fprime(x: np.ndarray) -> np.ndarray:
return get_gradients(y_true, x)
def fprime2(x: np.ndarray) -> np.ndarray:
return get_hessians(y_true, x)
optimum = newton(func, x0=x0, fprime=fprime, fprime2=fprime2,
maxiter=70, tol=2e-8)
# Need to ravel arrays because assert_allclose requires matching dimensions
y_true = y_true.ravel()
optimum = optimum.ravel()
assert_allclose(loss.inverse_link_function(optimum), y_true)
assert_allclose(func(optimum), 0, atol=1e-14)
assert_allclose(get_gradients(y_true, optimum), 0, atol=1e-7)
@pytest.mark.parametrize('loss, n_classes, prediction_dim', [
('least_squares', 0, 1),
('least_absolute_deviation', 0, 1),
('binary_crossentropy', 2, 1),
('categorical_crossentropy', 3, 3),
('poisson', 0, 1),
])
@pytest.mark.skipif(Y_DTYPE != np.float64,
reason='Need 64 bits float precision for numerical checks')
def test_numerical_gradients(loss, n_classes, prediction_dim, seed=0):
# Make sure gradients and hessians computed in the loss are correct, by
# comparing with their approximations computed with finite central
# differences.
# See https://en.wikipedia.org/wiki/Finite_difference.
rng = np.random.RandomState(seed)
n_samples = 100
if loss in ('least_squares', 'least_absolute_deviation'):
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif loss in ('poisson'):
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
raw_predictions = rng.normal(
size=(prediction_dim, n_samples)
).astype(Y_DTYPE)
loss = _LOSSES[loss](sample_weight=None)
get_gradients, get_hessians = get_derivatives_helper(loss)
# only take gradients and hessians of first tree / class.
gradients = get_gradients(y_true, raw_predictions)[0, :].ravel()
hessians = get_hessians(y_true, raw_predictions)[0, :].ravel()
# Approximate gradients
# For multiclass loss, we should only change the predictions of one tree
# (here the first), hence the use of offset[0, :] += eps
# As a softmax is computed, offsetting the whole array by a constant would
# have no effect on the probabilities, and thus on the loss
eps = 1e-9
offset = np.zeros_like(raw_predictions)
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset / 2)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset / 2)
numerical_gradients = (f_plus_eps - f_minus_eps) / eps
# Approximate hessians
eps = 1e-4 # need big enough eps as we divide by its square
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset)
f = loss.pointwise_loss(y_true, raw_predictions)
numerical_hessians = (f_plus_eps + f_minus_eps - 2 * f) / eps**2
assert_allclose(numerical_gradients, gradients, rtol=1e-4, atol=1e-7)
assert_allclose(numerical_hessians, hessians, rtol=1e-4, atol=1e-7)
def test_baseline_least_squares():
rng = np.random.RandomState(0)
loss = _LOSSES['least_squares'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the mean of all targets
assert_almost_equal(baseline_prediction, y_train.mean())
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
def test_baseline_least_absolute_deviation():
rng = np.random.RandomState(0)
loss = _LOSSES['least_absolute_deviation'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the median of all targets
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
assert baseline_prediction == pytest.approx(np.median(y_train))
def test_baseline_poisson():
rng = np.random.RandomState(0)
loss = _LOSSES['poisson'](sample_weight=None)
y_train = rng.poisson(size=100).astype(np.float64)
# Sanity check, make sure at least one sample is non-zero so we don't take
# log(0)
assert y_train.sum() > 0
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert np.isscalar(baseline_prediction)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Make sure baseline prediction produces the log of the mean of all targets
assert_almost_equal(np.log(y_train.mean()), baseline_prediction)
# Test baseline for y_true = 0
y_train.fill(0.)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
def test_baseline_binary_crossentropy():
rng = np.random.RandomState(0)
loss = _LOSSES['binary_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
assert np.allclose(loss.inverse_link_function(baseline_prediction),
y_train[0])
# Make sure baseline prediction is equal to link_function(p), where p
# is the proba of the positive class. We want predict_proba() to return p,
# and by definition
# p = inverse_link_function(raw_prediction) = sigmoid(raw_prediction)
# So we want raw_prediction = link_function(p) = log(p / (1 - p))
y_train = rng.randint(0, 2, size=100).astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
p = y_train.mean()
assert np.allclose(baseline_prediction, np.log(p / (1 - p)))
def test_baseline_categorical_crossentropy():
rng = np.random.RandomState(0)
prediction_dim = 4
loss = _LOSSES['categorical_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Same logic as for above test. Here inverse_link_function = softmax and
# link_function = log
y_train = rng.randint(0, prediction_dim + 1, size=100).astype(np.float32)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.shape == (prediction_dim, 1)
for k in range(prediction_dim):
p = (y_train == k).mean()
assert np.allclose(baseline_prediction[k, :], np.log(p))
@pytest.mark.parametrize('loss, problem', [
('least_squares', 'regression'),
('least_absolute_deviation', 'regression'),
('binary_crossentropy', 'classification'),
('categorical_crossentropy', 'classification'),
('poisson', 'poisson_regression'),
])
@pytest.mark.parametrize('sample_weight', ['ones', 'random'])
def test_sample_weight_multiplies_gradients(loss, problem, sample_weight):
# Make sure that passing sample weights to the gradient and hessians
# computation methods is equivalent to multiplying by the weights.
rng = np.random.RandomState(42)
n_samples = 1000
if loss == 'categorical_crossentropy':
n_classes = prediction_dim = 3
else:
n_classes = prediction_dim = 1
if problem == 'regression':
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif problem == 'poisson_regression':
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
if sample_weight == 'ones':
sample_weight = np.ones(shape=n_samples, dtype=Y_DTYPE)
else:
sample_weight = rng.normal(size=n_samples).astype(Y_DTYPE)
loss_ = _LOSSES[loss](sample_weight=sample_weight)
baseline_prediction = loss_.get_baseline_prediction(
y_true, None, prediction_dim
)
raw_predictions = np.zeros(shape=(prediction_dim, n_samples),
dtype=baseline_prediction.dtype)
raw_predictions += baseline_prediction
gradients = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
gradients_sw = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians_sw = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients_sw, hessians_sw, y_true,
raw_predictions, sample_weight)
assert np.allclose(gradients * sample_weight, gradients_sw)
assert np.allclose(hessians * sample_weight, hessians_sw)
def test_init_gradient_and_hessians_sample_weight():
# Make sure that passing sample_weight to a loss correctly influences the
# hessians_are_constant attribute, and consequently the shape of the
# hessians array.
prediction_dim = 2
n_samples = 5
sample_weight = None
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=None)
assert loss.hessians_are_constant
assert hessians.shape == (1, 1)
sample_weight = np.ones(n_samples)
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=sample_weight)
assert not loss.hessians_are_constant
assert hessians.shape == (prediction_dim, n_samples)
| {
"content_hash": "0b0d3465d0226e8442cb143108c2ac57",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 79,
"avg_line_length": 42.98198198198198,
"alnum_prop": 0.6636624048068189,
"repo_name": "bnaul/scikit-learn",
"id": "221b94183a7ffd79994ba83f9d221297993df611",
"size": "14313",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/_hist_gradient_boosting/tests/test_loss.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, print_function
"""
Created on Fri Mar 8 23:14:02 CET 2013
"""
import os.path
import collections
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio.pseudos import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class PseudoTestCase(PymatgenTest):
def setUp(self):
nc_pseudo_fnames = collections.defaultdict(list)
nc_pseudo_fnames["Si"] = ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi")
self.nc_pseudos = collections.defaultdict(list)
for (symbol, fnames) in nc_pseudo_fnames.items():
for fname in fnames:
root, ext = os.path.splitext(fname)
pseudo = Pseudo.from_file(ref_file(fname))
self.nc_pseudos[symbol].append(pseudo)
# Save the pseudo as instance attribute whose name
# is constructed with the rule: symbol_ppformat
attr_name = symbol + "_" + ext[1:]
if hasattr(self, attr_name):
raise RuntimError("self has already the attribute %s" % attr_name)
setattr(self, attr_name, pseudo)
def test_nc_pseudos(self):
"""Test norm-conserving pseudopotentials"""
for (symbol, pseudos) in self.nc_pseudos.items():
for pseudo in pseudos:
print(repr(pseudo))
print(pseudo)
self.assertTrue(pseudo.isnc)
self.assertFalse(pseudo.ispaw)
self.assertEqual(pseudo.Z, 14)
self.assertEqual(pseudo.symbol, symbol)
self.assertEqual(pseudo.Z_val, 4)
self.assertGreaterEqual(pseudo.nlcc_radius, 0.0)
print(pseudo.as_dict())
self.assertPMGSONable(pseudo)
# Test pickle
self.serialize_with_pickle(pseudo, test_eq=False)
# HGH pseudos
pseudo = self.Si_hgh
self.assertFalse(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 1)
self.assertEqual(pseudo.l_local, 0)
# TM pseudos
pseudo = self.Si_pspnc
self.assertTrue(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 2)
self.assertEqual(pseudo.l_local, 2)
# FHI pseudos
pseudo = self.Si_fhi
self.assertFalse(pseudo.has_nlcc)
self.assertEqual(pseudo.l_max, 3)
self.assertEqual(pseudo.l_local, 2)
# Test PseudoTable.
table = PseudoTable(self.nc_pseudos["Si"])
print(repr(table))
print(table)
self.assertTrue(table.allnc)
self.assertTrue(not table.allpaw)
self.assertFalse(not table.is_complete)
assert len(table) == 3
assert len(table[14]) == 3
assert len(table.select_symbols("Si")) == 3
assert table.zlist == [14]
# Test pickle
self.serialize_with_pickle(table, test_eq=False)
def test_pawxml_pseudos(self):
"""Test O.GGA_PBE-JTH-paw.xml."""
oxygen = Pseudo.from_file(ref_file("O.GGA_PBE-JTH-paw.xml"))
print(repr(oxygen))
print(oxygen)
print(oxygen.as_dict())
self.assertTrue(oxygen.ispaw)
self.assertTrue(oxygen.symbol == "O" and
(oxygen.Z, oxygen.core, oxygen.valence) == (8, 2, 6),
oxygen.Z_val == 6,
)
self.assert_almost_equal(oxygen.paw_radius, 1.4146523028)
# Test pickle
new_objs = self.serialize_with_pickle(oxygen, test_eq=False)
for o in new_objs:
print(repr(o))
print(o)
self.assertTrue(o.ispaw)
self.assertTrue(o.symbol == "O" and
(o.Z, o.core, o.valence) == (8, 2, 6),
o.Z_val == 6,
)
self.assert_almost_equal(o.paw_radius, 1.4146523028)
def test_ncvpsp_pseudo(self):
"""
Test the NCVPSP Ge pseudo
"""
ger = Pseudo.from_file(ref_file("ge.oncvpsp"))
print(repr(ger))
print(ger)
print(ger.as_dict())
self.assertTrue(ger.symbol == "Ge")
self.assert_equal(ger.Z, 32.0)
self.assert_equal(ger.Z_val, 4.0)
self.assertTrue(ger.isnc)
self.assertFalse(ger.ispaw)
self.assert_equal(ger.l_max, 2)
self.assert_equal(ger.l_local, 4)
self.assert_equal(ger.rcore, None)
class PseudoTableTest(PymatgenTest):
def test_methods(self):
"""Test PseudoTable methods"""
table = PseudoTable(ref_files("14si.pspnc", "14si.4.hgh", "14-Si.LDA.fhi"))
print(table)
assert len(table) == 3
for pseudo in table:
assert pseudo.isnc
assert table.allnc and not table.allpaw
assert table.zlist == [14]
# Data persistence
self.serialize_with_pickle(table, test_eq=False)
#d = table.as_dict()
#PseudoTable.from_dict(d)
#self.assertPMGSONable(table)
selected = table.select_symbols("Si")
assert len(selected) == len(table) and selected.__class__ is table.__class__
with self.assertRaises(ValueError):
table.pseudos_with_symbols("Si")
if __name__ == "__main__":
import unittest
unittest.main()
| {
"content_hash": "abafd461f86e8f63b5a05df75fe48301",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 88,
"avg_line_length": 32.293785310734464,
"alnum_prop": 0.5472358292512246,
"repo_name": "sonium0/pymatgen",
"id": "078b80906054c45737694993fdfb79b6db41e51f",
"size": "5826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/abinitio/tests/test_pseudos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3590333"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_searchcode import sfp_searchcode
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleIntegrationCodesearch(unittest.TestCase):
@unittest.skip("todo")
def test_handleEvent(self):
sf = SpiderFoot(self.default_options)
module = sfp_searchcode()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'DOMAIN_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| {
"content_hash": "37491cdf056f69e2b2f86198221c87ce",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 27.28125,
"alnum_prop": 0.6769759450171822,
"repo_name": "smicallef/spiderfoot",
"id": "ba27394d4e0700701f52e7e4051f657d2bd4124c",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration/modules/test_sfp_searchcode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
"""Tests for features."""
import copy
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from google.protobuf import text_format
from s2clientprotocol import sc2api_pb2 as sc_pb
# Heavily trimmed, so this is useful for testing actions, but not observations.
observation_text_proto = """
player_common {
player_id: 1
minerals: 0
vespene: 0
food_cap: 10
food_used: 0
food_army: 0
food_workers: 0
idle_worker_count: 0
army_count: 0
warp_gate_count: 0
larva_count: 0
}
game_loop: 20
"""
RECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))
SQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)
class AvailableActionsTest(absltest.TestCase):
always_expected = {
"no_op", "move_camera", "select_point", "select_rect",
"select_control_group"
}
def setUp(self):
super(AvailableActionsTest, self).setUp()
self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())
self.hideSpecificActions(True)
def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name
self.features = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=hide_specific_actions))
def assertAvail(self, expected):
actual = self.features.available_actions(self.obs)
actual_names = {actions.FUNCTIONS[i].name for i in actual}
self.assertEqual(actual_names, set(expected) | self.always_expected)
def testAlways(self):
self.assertAvail([])
def testSelectUnit(self):
self.obs.ui_data.multi.units.add(unit_type=1)
self.assertAvail(["select_unit"])
def testSelectIdleWorkder(self):
self.obs.player_common.idle_worker_count = 1
self.assertAvail(["select_idle_worker"])
def testSelectArmy(self):
self.obs.player_common.army_count = 3
self.assertAvail(["select_army"])
def testSelectWarpGates(self):
self.obs.player_common.warp_gate_count = 1
self.assertAvail(["select_warp_gates"])
def testSelectLarva(self):
self.obs.player_common.larva_count = 2
self.assertAvail(["select_larva"])
def testQuick(self):
self.obs.abilities.add(ability_id=32)
self.assertAvail(["Effect_Salvage_quick"])
def testScreen(self):
self.obs.abilities.add(ability_id=326, requires_point=True)
self.assertAvail(["Build_SensorTower_screen"])
def testScreenMinimap(self):
self.obs.abilities.add(ability_id=17, requires_point=True)
self.assertAvail(["Patrol_screen", "Patrol_minimap"])
def testScreenAutocast(self):
self.obs.abilities.add(ability_id=386, requires_point=True)
self.assertAvail(["Effect_Heal_screen", "Effect_Heal_autocast"])
def testScreenQuick(self):
a = self.obs.abilities.add(ability_id=421)
self.hideSpecificActions(True)
a.requires_point = False
self.assertAvail(["Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_screen"])
self.hideSpecificActions(False)
a.requires_point = False
self.assertAvail(["Build_TechLab_Barracks_quick", "Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_Barracks_screen", "Build_TechLab_screen"])
def testGeneral(self):
self.obs.abilities.add(ability_id=1374)
self.hideSpecificActions(False)
self.assertAvail(["BurrowDown_quick", "BurrowDown_Baneling_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowDown_quick"])
def testGeneralType(self):
a = self.obs.abilities.add(ability_id=1376)
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Baneling_quick",
"BurrowUp_autocast", "BurrowUp_Baneling_autocast"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick", "BurrowUp_autocast"])
a.ability_id = 2110
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Lurker_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick"])
def testMany(self):
add = [
(23, True), # Attack
(318, True), # Build_CommandCenter
(320, True), # Build_Refinery
(319, True), # Build_SupplyDepot
(316, True), # Effect_Repair_SCV
(295, True), # Harvest_Gather_SCV
(16, True), # Move
(17, True), # Patrol
(4, False), # Stop
]
for a, r in add:
self.obs.abilities.add(ability_id=a, requires_point=r)
self.hideSpecificActions(False)
self.assertAvail([
"Attack_Attack_minimap",
"Attack_Attack_screen",
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Effect_Repair_SCV_autocast",
"Effect_Repair_SCV_screen",
"Harvest_Gather_screen",
"Harvest_Gather_SCV_screen",
"Move_minimap",
"Move_screen",
"Move_Move_minimap",
"Move_Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Patrol_Patrol_minimap",
"Patrol_Patrol_screen",
"Stop_quick",
"Stop_Stop_quick"
])
self.hideSpecificActions(True)
self.assertAvail([
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Harvest_Gather_screen",
"Move_minimap",
"Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Stop_quick",
])
class ToPointTest(absltest.TestCase):
def testIntAsString(self):
value = features._to_point("32")
self.assertEqual(value, point.Point(32, 32))
def testIntStringTwoTuple(self):
value = features._to_point(("32", 64))
self.assertEqual(value, point.Point(32, 64))
def testNoneInputReturnsNoneOutput(self):
with self.assertRaises(AssertionError):
features._to_point(None)
def testNoneAsFirstElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((None, 32))
def testNoneAsSecondElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((32, None))
def testSingletonTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32,))
def testThreeTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32, 32, 32))
class DimensionsTest(absltest.TestCase):
def testScreenSizeWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=84)
def testScreenWidthWithoutHeightRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 0), minimap=64)
def testScreenWidthHeightWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 80))
def testMinimapWidthAndHeightWithoutScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(minimap=(64, 67))
def testNoneNoneRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=None, minimap=None)
def testSingularZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=0, minimap=0)
def testTwoZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(0, 0), minimap=(0, 0))
def testThreeTupleScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(1, 2, 3), minimap=32)
def testThreeTupleMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(1, 2, 3))
def testNegativeScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=-64, minimap=32)
def testNegativeMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=-32)
def testNegativeScreenTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(-64, -64), minimap=32)
def testNegativeMinimapTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(-32, -32))
def testEquality(self):
self.assertEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=64))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=32))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)
class TestParseAgentInterfaceFormat(parameterized.TestCase):
def test_no_arguments_raises(self):
with self.assertRaises(ValueError):
features.parse_agent_interface_format()
@parameterized.parameters((32, None), (None, 32))
def test_invalid_feature_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
feature_screen=screen,
feature_minimap=minimap)
def test_valid_feature_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24))
self.assertEqual(
agent_interface_format.feature_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.feature_dimensions.minimap,
point.Point(24, 24))
@parameterized.parameters((32, None), (None, 32), (32, 64))
def test_invalid_minimap_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
rgb_screen=screen,
rgb_minimap=minimap)
def test_valid_minimap_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
rgb_screen=32,
rgb_minimap=(24, 24))
self.assertEqual(
agent_interface_format.rgb_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.rgb_dimensions.minimap,
point.Point(24, 24))
def test_invalid_action_space_raises(self):
with self.assertRaises(KeyError):
features.parse_agent_interface_format(
feature_screen=64,
feature_minimap=64,
action_space="UNKNOWN_ACTION_SPACE")
@parameterized.parameters(actions.ActionSpace.__members__.keys())
def test_valid_action_space_is_parsed(self, action_space):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
rgb_screen=64,
rgb_minimap=(48, 48),
use_raw_units=True,
action_space=action_space)
self.assertEqual(
agent_interface_format.action_space,
actions.ActionSpace[action_space])
def test_camera_width_world_units_are_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
camera_width_world_units=77)
self.assertEqual(agent_interface_format.camera_width_world_units, 77)
def test_use_feature_units_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
use_feature_units=True)
self.assertEqual(agent_interface_format.use_feature_units, True)
class FeaturesTest(absltest.TestCase):
def testFunctionsIdsAreConsistent(self):
for i, f in enumerate(actions.FUNCTIONS):
self.assertEqual(i, f.id, "id doesn't match for %s" % f.id)
def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):
for ability_id, funcs in actions.ABILITY_IDS.items():
self.assertLen({f.general_id for f in funcs}, 1,
"Multiple generals for %s" % ability_id)
def testValidFunctionsAreConsistent(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
valid_funcs = feats.action_spec()
for func_def in valid_funcs.functions:
func = actions.FUNCTIONS[func_def.id]
self.assertEqual(func_def.id, func.id)
self.assertEqual(func_def.name, func.name)
self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert
def gen_random_function_call(self, action_spec, func_id):
args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension
for arg in action_spec.functions[func_id].args]
return actions.FunctionCall(func_id, args)
def testIdsMatchIndex(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
action_spec = feats.action_spec()
for func_index, func_def in enumerate(action_spec.functions):
self.assertEqual(func_index, func_def.id)
for type_index, type_def in enumerate(action_spec.types):
self.assertEqual(type_index, type_def.id)
def testReversingUnknownAction(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
sc2_action = sc_pb.Action()
sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer
func_call = feats.reverse_action(sc2_action)
self.assertEqual(func_call.function, 0) # No-op
def testSpecificActionsAreReversible(self):
"""Test that the `transform_action` and `reverse_action` are inverses."""
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
action_spec = feats.action_spec()
for func_def in action_spec.functions:
for _ in range(10):
func_call = self.gen_random_function_call(action_spec, func_def.id)
sc2_action = feats.transform_action(
None, func_call, skip_available=True)
func_call2 = feats.reverse_action(sc2_action)
sc2_action2 = feats.transform_action(
None, func_call2, skip_available=True)
if func_def.id == actions.FUNCTIONS.select_rect.id:
# Need to check this one manually since the same rect can be
# defined in multiple ways.
def rect(a):
return point.Rect(point.Point(*a[1]).floor(),
point.Point(*a[2]).floor())
self.assertEqual(func_call.function, func_call2.function)
self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert
self.assertEqual(func_call.arguments[0], func_call2.arguments[0])
self.assertEqual(rect(func_call.arguments),
rect(func_call2.arguments))
else:
self.assertEqual(func_call, func_call2, msg=sc2_action)
self.assertEqual(sc2_action, sc2_action2)
def testRawActionUnitTags(self):
feats = features.Features(
features.AgentInterfaceFormat(
use_raw_units=True,
action_space=actions.ActionSpace.RAW),
map_size=point.Point(100, 100))
tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]
ntags = numpy.array(tags, dtype=numpy.int64)
tag = tags[0]
ntag = numpy.array(tag, dtype=numpy.int64)
def transform(fn, *args):
func_call = actions.RAW_FUNCTIONS[fn]("now", *args)
proto = feats.transform_action(None, func_call, skip_available=True)
return proto.action_raw.unit_command
self.assertEqual(transform("Attack_pt", tag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", ntag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [tag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [ntag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", tags, [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", ntags, [15, 20]).unit_tags, tags)
# Weird, but needed for backwards compatibility
self.assertEqual(transform("Attack_pt", [tags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", [ntags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_unit", tag, tag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, ntag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [tag]).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [ntag]).target_unit_tag, tag)
def testCanPickleSpecs(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
action_spec = feats.action_spec()
observation_spec = feats.observation_spec()
self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))
self.assertEqual(observation_spec,
pickle.loads(pickle.dumps(observation_spec)))
def testCanPickleFunctionCall(self):
func = actions.FUNCTIONS.select_point("select", [1, 2])
self.assertEqual(func, pickle.loads(pickle.dumps(func)))
def testCanDeepcopyNumpyFunctionCall(self):
arguments = [numpy.float32] * len(actions.Arguments._fields)
dtypes = actions.FunctionCall(
function=numpy.float32,
arguments=actions.Arguments(*arguments))
self.assertEqual(dtypes, copy.deepcopy(dtypes))
def testSizeConstructors(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 84))
self.assertEqual(spec.types.screen2.sizes, (84, 84))
self.assertEqual(spec.types.minimap.sizes, (64, 64))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
# Missing one or the other of game_info and dimensions.
with self.assertRaises(ValueError):
features.Features()
# Resolution/action space mismatch.
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.RGB))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
rgb_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.FEATURES))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=RECTANGULAR_DIMENSIONS))
def testFlRgbActionSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.RGB))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (128, 132))
self.assertEqual(spec.types.screen2.sizes, (128, 132))
self.assertEqual(spec.types.minimap.sizes, (74, 77))
def testFlRgbObservationSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
obs_spec = feats.observation_spec()
self.assertEqual(obs_spec["feature_screen"], # pylint: disable=g-generic-assert
(len(features.SCREEN_FEATURES), 80, 84))
self.assertEqual(obs_spec["feature_minimap"], # pylint: disable=g-generic-assert
(len(features.MINIMAP_FEATURES), 67, 64))
self.assertEqual(obs_spec["rgb_screen"], (132, 128, 3))
self.assertEqual(obs_spec["rgb_minimap"], (77, 74, 3))
if __name__ == "__main__":
absltest.main()
| {
"content_hash": "af43e7040ec01fb56ca3be18fe03d06f",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 115,
"avg_line_length": 36.66840277777778,
"alnum_prop": 0.6882723355901709,
"repo_name": "deepmind/pysc2",
"id": "4a924fcf7af01bb8eb2987dcd22cc53b3dbc4573",
"size": "21736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysc2/lib/features_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "733866"
},
{
"name": "Starlark",
"bytes": "42723"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from abc import ABCMeta, abstractmethod
from BeautifulSoup import BeautifulSoup as BS
from argparse import ArgumentParser
from unidecode import unidecode
import itertools
import urllib2
import urllib
import json
import xmltodict
import os
import sys
# ------------------------------------------------------------------------------
class BibliographyMiner:
"""Basic structure for a data miner."""
__metaclass__ = ABCMeta
@abstractmethod
def mine(iden):
pass
# ------------------------------------------------------------------------------
CACHE_DIRECTORY = "cache/"
def url_to_cache_path(url):
"""Returns a local cache path from a web url"""
if url.find("http://") != -1:
url = url[7:]
return CACHE_DIRECTORY + url
def read_cache(url):
"""Returns the cached string for url or None if url is not cached"""
path = url_to_cache_path(url)
try:
return open(path, "r").read()
except IOError:
return None
def cache_page(url, page):
"""Caches the given page string from the given url"""
path = url_to_cache_path(url)
directory = os.path.dirname(path)
# Create the cache directory if necessary
if not os.path.exists(directory):
os.makedirs(directory)
# Write the page to the cache
f = open(path,'w')
print(page, file=f)
f.close()
def url_read(url):
"""Returns a string of the data from the given url.
Checks the local cache before making a web request.
"""
page_str = read_cache(url)
if not page_str:
print("reading (web):", url, file=sys.stderr)
page_str = urllib2.urlopen(url).read()
cache_page(url,page_str)
else:
print("reading (cache):", url, file=sys.stderr)
return page_str
# ------------------------------------------------------------------------------
DBLP_LINK_FORMAT = "http://www.informatik.uni-trier.de/~ley/db/conf/{name}/index.html"
DBLP_YEAR_LINK_FLAG = "http://dblp.uni-trier.de/img/venues.dark.hollow.16x16.png"
class DBLPMiner(BibliographyMiner):
"""Mines bibliographical data from the DBLP cs journal database."""
def mine(self, iden, filename=None, find_citations=True, limit=30, skip=0):
# Open output file
filename = filename if filename else iden + '.dat'
fout = open(filename, 'w+')
# Generate url to mine
url = DBLP_LINK_FORMAT.format(name = iden)
## Extract the layers of urls on the dblp website
# Get the various conference year urls
year_urls = self._extract_year_urls(url)
for year_url in year_urls[skip : skip + limit]:
# Get the urls for the papers in the given conference
paper_xml_urls = self._extract_paper_xml_urls(year_url)
# Get the raw xml for the papers
try:
xml_dicts = map (lambda x : self._extract_xml_from_url(x), paper_xml_urls)
except:
print("failure reading paper xml urls:", paper_xml_urls, file=sys.stderr)
# Extract the useless wrapper data from the dblp bibliography xml
f_xml_dicts = filter(lambda x : isinstance(x['title'], unicode),
map(lambda x : x.values()[0].values()[0], xml_dicts))
# Kick off a request to CiteSeer to finde find the citation the paper uses
if find_citations:
citations_list = map(lambda x : CiteSeerMiner().mine(unidecode(x['title'])), f_xml_dicts)
for i, citations in enumerate(citations_list):
if citations:
f_xml_dicts[i]['citations'] = citations
# Write the citations to the file
map (lambda x : fout.write(json.dumps(x) + '\n'), f_xml_dicts)
fout.close()
def _extract_year_urls(self, url):
"""Returns a list of year urls from the given dblp page found at url."""
parser = BS(url_read(url))
keyword = url.rsplit('/',2)[1]
return filter (lambda url : keyword in url,
map(lambda x : x.find('a')['href'],
filter(lambda x : x.find('img', {'src': DBLP_YEAR_LINK_FLAG}) != None,
parser.findAll('div', {'class': 'head'}))))
def _extract_paper_xml_urls(self, url):
"""Returns a list of xml paper urls from the given dblp page found at url."""
parser = BS(url_read(url))
return map(lambda x : x['href'],
filter(lambda x: x.getText().find("XML") != -1,
parser.findAll('a')))
def _extract_xml_from_url(self, url):
"""Returns a list of xml paper data found at given url."""
return xmltodict.parse(url_read(url))
CITESEER_DOMAIN = "http://citeseer.ist.psu.edu"
CITESEER_SEARCH_LINK = CITESEER_DOMAIN + "/search?q=title%3A%28{title}%29&sort=cite&t=doc"
CITESEER_DOCUMENT_LINK_PARTIAL = "/viewdoc/"
class CiteSeerMiner(BibliographyMiner):
def mine(self, iden):
iden = iden.lower()
search_url = CITESEER_SEARCH_LINK.format(title = urllib.quote_plus(iden))
paper_url = self._extract_paper_url(iden, search_url)
if not paper_url:
print("citation not found for", iden, file=sys.stderr)
return None
else:
print("citation found for", iden, file=sys.stderr)
try:
citation_urls = self._extract_citation_urls(paper_url)
except:
return None
authors = map(lambda x : self._extract_citation_from_url(x), citation_urls[0:2])
return list(itertools.chain(*authors))
def _extract_paper_url(self, title, url):
try:
parser = BS(url_read(url))
link = parser.find('div', {'class':'result'}).find('h3').find('a')
search_title = unidecode(link.contents[0].strip().lower())
if search_title[:len(title)/2] == title[:len(title)/2]:
return CITESEER_DOMAIN + link['href']
else:
return None
except:
return None
def _extract_citation_urls(self, url):
parser = BS(url_read(url))
return filter(lambda x : CITESEER_DOCUMENT_LINK_PARTIAL in x,
map(lambda x : CITESEER_DOMAIN + x['href'], parser.find('div', {'id':'citations'}).findAll('a')))
def _extract_citation_from_url(self, url):
parser = BS(url_read(url))
try:
return map(lambda s : s.strip(), " ".join(unidecode(parser.find('div', {'id':'docAuthors'}).contents[0]).split())[2:].split(','))
except:
return []
# ------------------------------------------------------------------------------
# Setup command line parser
p = ArgumentParser(description="Extacts bibliographic data from DBLP.")
p.add_argument('name', help='the name of the conference to extract')
p.add_argument('-f', dest='file', help='the output file for conference data')
p.add_argument('-l', dest='limit', type=int, default=30, help='number of conference dates to mine (default 30)')
p.add_argument('-s', dest='skip', type=int, default=0, help='number of conference dates (in chronological order) to skip before mining (default 0)')
p.add_argument('-nc', '--nocite', dest='citations', action="store_false", default=True, help='disable citation mining from CiteSeer')
args = p.parse_args()
# Kick off the data mining
miner = DBLPMiner()
miner.mine(args.name, find_citations=args.citations, filename=args.file, limit=args.limit, skip=args.skip)
| {
"content_hash": "0d4e1742ff84de75ae50911ec0223f65",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 148,
"avg_line_length": 38.35858585858586,
"alnum_prop": 0.5815668202764976,
"repo_name": "rjullman/pubminer",
"id": "039e88f1666c72f493c2b76eab05f92d7ac84b01",
"size": "7595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7595"
}
],
"symlink_target": ""
} |
from south.db import db
from django.db import models
from mypage.pages.models import *
import datetime
MOVE_SQL = '''
UPDATE
`%(table)s` tbl
SET
tbl.`%(to)s` = tbl.`%(from)s`;
'''
class Migration:
def forwards(self, orm):
table_name = 'pages_userpage'
db.add_column(table_name, 'tmp_user', models.IntegerField(null=True))
db.execute(MOVE_SQL % {
'table': table_name,
'from': 'user_id',
'to': 'tmp_user',
})
db.delete_column(table_name, 'user_id')
db.add_column(table_name, 'user', models.ForeignKey(orm['auth.User'], null=True))
db.execute(MOVE_SQL % {
'table': table_name,
'from': 'tmp_user',
'to': 'user_id',
})
db.alter_column(table_name, 'user_id', models.ForeignKey(orm['auth.User'], null=False))
db.delete_column(table_name, 'tmp_user')
def backwards(self, orm):
table_name = 'pages_userpage'
print 'zatim no way back'
'''
# create integer field
db.alter_column(table_name, 'user_id', models.IntegerField())
# remove _id suffix
db.rename_column(table_name, 'user_id', 'tmp_user')
# remove old constraints
db.add_column(table_name, 'user', models.ForeignKey(orm['auth.User'], null=True))
db.delete_column(table_name, 'user_id')
'''
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.widgetinpage': {
'Meta': {'unique_together': "(('page','widget',),)"},
'config_json': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ["orm['pages.Page']"], {'verbose_name': "_('Page')"}),
'rendered_widget': ('models.ForeignKey', ["orm['widgets.RenderedWidget']"], {'null': 'False'}),
'state': ('models.SmallIntegerField', [], {'default': '2'}),
'widget': ('models.ForeignKey', ["orm['widgets.Widget']"], {'verbose_name': "_('Widget')"})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'widgets.widget': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'layout_json': ('models.TextField', [], {}),
'site': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'skin': ('models.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'template': ('models.CharField', [], {'default': "'page.html'", 'max_length': '100'}),
'widgets': ('models.ManyToManyField', ["orm['widgets.Widget']"], {'through': "'WidgetInPage'"})
},
'widgets.renderedwidget': {
'Meta': {'unique_together': "(('widget','state','site',),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.userpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'db_index': 'True'})
},
'pages.sessionpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'session_key': ('models.CharField', ["_('session key')"], {'max_length': '40', 'db_index': 'True'}),
'updated': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'False'})
}
}
complete_apps = ['pages']
| {
"content_hash": "3e8b9e8d9ff3eadd3c3d994e8ae12717",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 112,
"avg_line_length": 37.9622641509434,
"alnum_prop": 0.49726640159045726,
"repo_name": "ella/mypage",
"id": "2f1b8bf0f6988d990991860ab60abd0ffca7c9c1",
"size": "4024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypage/pages/migrations/0006_drop_user_unique.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "232497"
},
{
"name": "Shell",
"bytes": "3912"
}
],
"symlink_target": ""
} |
"""
Example to show usage of AutoLockRenewer asynchronously:
1. Automatically renew locks on messages received from non-sessionful entity
2. Automatically renew locks on the session of sessionful entity
"""
import os
import asyncio
from azure.servicebus import ServiceBusMessage
from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
from azure.servicebus.exceptions import ServiceBusError
CONNECTION_STR = os.environ['SERVICEBUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICEBUS_QUEUE_NAME"]
SESSION_QUEUE_NAME = os.environ['SERVICEBUS_SESSION_QUEUE_NAME']
async def renew_lock_on_message_received_from_non_sessionful_entity():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
async with servicebus_client.get_queue_sender(queue_name=QUEUE_NAME) as sender:
msgs_to_send = [ServiceBusMessage("session message: {}".format(i)) for i in range(10)]
await sender.send_messages(msgs_to_send)
print('Send messages to non-sessionful queue.')
# Can also be called via "with AutoLockRenewer() as renewer" to automate shutdown.
renewer = AutoLockRenewer()
async with servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME, prefetch_count=10) as receiver:
received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)
for msg in received_msgs:
# automatically renew the lock on each message for 100 seconds
renewer.register(receiver, msg, max_lock_renewal_duration=100)
print('Register messages into AutoLockRenewer done.')
await asyncio.sleep(100) # message handling for long period (E.g. application logic)
for msg in received_msgs:
await receiver.complete_message(msg)
print('Complete messages.')
await renewer.close()
async def renew_lock_on_session_of_the_sessionful_entity():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
async with servicebus_client.get_queue_sender(queue_name=SESSION_QUEUE_NAME) as sender:
msgs_to_send = [ServiceBusMessage("session message: {}".format(i), session_id='SESSION') for i in range(10)]
await sender.send_messages(msgs_to_send)
print('Send messages to sessionful queue.')
renewer = AutoLockRenewer()
async with servicebus_client.get_queue_receiver(
queue_name=SESSION_QUEUE_NAME,
session_id='SESSION',
prefetch_count=10
) as receiver:
# automatically renew the lock on the session for 100 seconds
renewer.register(receiver, receiver.session, max_lock_renewal_duration=100)
print('Register session into AutoLockRenewer.')
received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)
await asyncio.sleep(100) # message handling for long period (E.g. application logic)
for msg in received_msgs:
await receiver.complete_message(msg)
print('Complete messages.')
async def renew_lock_with_lock_renewal_failure_callback():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
async with servicebus_client.get_queue_sender(queue_name=QUEUE_NAME) as sender:
await sender.send_messages(ServiceBusMessage("message"))
async with AutoLockRenewer() as renewer:
# For this sample we're going to set the renewal recurrence of the autolockrenewer to greater than the
# service side message lock duration, to demonstrate failure. Normally, this should not be adjusted.
renewer._sleep_time = 40
async with servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME, prefetch_count=10) as receiver:
async def on_lock_renew_failure_callback(renewable, error):
# If auto-lock-renewal fails, this function will be called.
# If failure is due to an error, the second argument will be populated, otherwise
# it will default to `None`.
# This callback can be an ideal location to log the failure, or take action to safely
# handle any processing on the message or session that was in progress.
print("Intentionally failed to renew lock on {} due to {}".format(renewable, error))
received_msgs = await receiver.receive_messages(max_message_count=1, max_wait_time=5)
for msg in received_msgs:
# automatically renew the lock on each message for 120 seconds
renewer.register(receiver,
msg,
max_lock_renewal_duration=90,
on_lock_renew_failure=on_lock_renew_failure_callback)
print('Register messages into AutoLockRenewer done.')
# Cause the messages and autorenewal to time out.
# Other reasons for renew failure could include a network or service outage.
await asyncio.sleep(80)
try:
for msg in received_msgs:
await receiver.complete_message(msg)
except ServiceBusError as e:
print('Messages cannot be settled if they have timed out. (This is expected)')
print('Lock renew failure demonstration complete.')
asyncio.run(renew_lock_on_message_received_from_non_sessionful_entity())
asyncio.run(renew_lock_on_session_of_the_sessionful_entity())
asyncio.run(renew_lock_with_lock_renewal_failure_callback())
| {
"content_hash": "7a67eb827111da294cf899dd45ed482d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 120,
"avg_line_length": 48.203252032520325,
"alnum_prop": 0.6549165120593692,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e83c3fea5953ef576f392c30d2629b86f94c5eb4",
"size": "6298",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-servicebus/samples/async_samples/auto_lock_renew_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import max7219.led as led
import time
from max7219.font import proportional, SINCLAIR_FONT, TINY_FONT, CP437_FONT
from random import randrange
device = led.matrix(cascaded=4)
device.orientation(90)
device.scroll_up(ord('A'))
| {
"content_hash": "fdf1e9ceeee76ea0863e707b750520a7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 23.8,
"alnum_prop": 0.7521008403361344,
"repo_name": "mB-PiBox/max7219-examples",
"id": "44c7bf4e5281b9129b9c7ed5e22614ee77c8d171",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrolltest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16078"
}
],
"symlink_target": ""
} |
import logging
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
# Populate project choices
project_choices = []
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
domain_id = kwargs['initial'].get('domain_id', None)
projects, has_more = api.keystone.tenant_list(request,
domain=domain_id,
user=user_id)
for project in projects:
if project.enabled:
project_choices.append((project.id, project.name))
if not project_choices:
project_choices.insert(0, ('', _("No available projects")))
elif len(project_choices) > 1:
project_choices.insert(0, ('', _("Select a project")))
self.fields['project'].choices = project_choices
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data:
if data['password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
ADD_PROJECT_URL = "horizon:admin:projects:create"
class CreateUserForm(BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("User Name"))
email = forms.EmailField(
label=_("Email"),
required=False)
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
project = forms.DynamicChoiceField(label=_("Primary Project"),
add_item_link=ADD_PROJECT_URL)
role_id = forms.ChoiceField(label=_("Role"))
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
role_choices = [(role.id, role.name) for role in roles]
self.fields['role_id'].choices = role_choices
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain = api.keystone.get_default_domain(self.request)
try:
LOG.info('Creating user with name "%s"' % data['name'])
if "email" in data:
data['email'] = data['email'] or None
new_user = api.keystone.user_create(request,
name=data['name'],
email=data['email'],
password=data['password'],
project=data['project'],
enabled=True,
domain=domain.id)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
if data['role_id']:
try:
api.keystone.add_tenant_user_role(request,
data['project'],
new_user.id,
data['role_id'])
except Exception:
exceptions.handle(request,
_('Unable to add user '
'to primary project.'))
return new_user
except Exception:
exceptions.handle(request, _('Unable to create user.'))
class UpdateUserForm(BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(label=_("User Name"))
email = forms.EmailField(
label=_("Email"),
required=False)
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
required=False,
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False),
required=False)
project = forms.ChoiceField(label=_("Primary Project"))
def __init__(self, request, *args, **kwargs):
super(UpdateUserForm, self).__init__(request, *args, **kwargs)
if api.keystone.keystone_can_edit_user() is False:
for field in ('name', 'email', 'password', 'confirm_password'):
self.fields.pop(field)
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data', 'password')
def handle(self, request, data):
user = data.pop('id')
# Throw away the password confirmation, we're done with it.
data.pop('confirm_password', None)
data.pop('domain_id')
data.pop('domain_name')
try:
if "email" in data:
data['email'] = data['email'] or None
response = api.keystone.user_update(request, user, **data)
messages.success(request,
_('User has been updated successfully.'))
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
| {
"content_hash": "fa21842a91904bb8e5e205d04d98872e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 78,
"avg_line_length": 42.43010752688172,
"alnum_prop": 0.5496705524581855,
"repo_name": "rd37/horizon",
"id": "89a2b7cfac076b9188665dd0c6096638644d375f",
"size": "8701",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/users/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "334034"
},
{
"name": "JavaScript",
"bytes": "707335"
},
{
"name": "Python",
"bytes": "3254186"
},
{
"name": "Shell",
"bytes": "15924"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.model_selection import GroupKFold, cross_validate
from sklearn.pipeline import make_pipeline
N, M = 100, 4
X = np.random.rand(N, M)
y = np.random.randint(0, 2, size=N)
my_groups = np.random.randint(0, 10, size=N)
my_weights = np.random.rand(N)
my_other_weights = np.random.rand(N)
| {
"content_hash": "9ef4b61c51cce92868f5d60618f06a80",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 35.785714285714285,
"alnum_prop": 0.7844311377245509,
"repo_name": "scikit-learn/enhancement_proposals",
"id": "26c1d6a1bad2dc0d1cb49b1694273513498580b1",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slep006/defs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "835"
},
{
"name": "Makefile",
"bytes": "629"
},
{
"name": "Python",
"bytes": "28761"
}
],
"symlink_target": ""
} |
'''
Nereid User
user.py
'''
from trytond.pool import Pool, PoolMeta
from nereid import route, request, abort
from werkzeug.contrib.atom import AtomFeed
__all__ = ['NereidUser']
__metaclass__ = PoolMeta
class NereidUser:
__name__ = 'nereid.user'
def serialize(self, purpose=None):
"""
Downstream implementation of serialize() which adds serialization for
atom feeds.
"""
if purpose == 'atom':
return {
'name': self.display_name,
'email': self.email or None,
}
elif hasattr(super(NereidUser, self), 'serialize'):
return super(NereidUser, self).serialize(purpose=purpose)
@classmethod
@route('/article-author/<int:id>.atom')
def atom_feed(cls, id):
"""
Returns the atom feed for all articles published under a certain author
"""
Article = Pool().get('nereid.cms.article')
try:
articles = Article.search([
('author', '=', id),
('state', '=', 'published'),
])
except:
abort(404)
feed = AtomFeed(
"Articles by Author %s" % cls(id).display_name,
feed_url=request.url, url=request.host_url
)
for article in articles:
feed.add(**article.serialize(purpose='atom'))
return feed.get_response()
| {
"content_hash": "9367ffac9efd63eac4d62ae0e0f63a2a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 25.392857142857142,
"alnum_prop": 0.5464135021097046,
"repo_name": "fulfilio/nereid-cms",
"id": "07ccf09d8d3d83efc016709af47ec4ebb35a9fc6",
"size": "1446",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67881"
}
],
"symlink_target": ""
} |
import os, send2trash
def deleteunuse(folder):
folder = os.path.abspath(folder)
for foldername, subfolders, filenames in os.walk(folder):
for filename in filenames:
file = os.path.join(folder, foldername, filename)
filesize = os.path.getsize(file)
if filesize >= 10000000:
send2trash.send2trash(file)
deleteunuse('F:\\python')
| {
"content_hash": "bf95b2dd68383fb34f072ef3f2576557",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 28.5,
"alnum_prop": 0.6365914786967418,
"repo_name": "sallyyoo/ced2",
"id": "ae94f89be2c03e9bdbdd4315ab98f6263b3cdb74",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/practice/9/deleteunuse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12"
},
{
"name": "CSS",
"bytes": "68019"
},
{
"name": "DIGITAL Command Language",
"bytes": "583"
},
{
"name": "HTML",
"bytes": "815205"
},
{
"name": "JavaScript",
"bytes": "3816151"
},
{
"name": "PHP",
"bytes": "6048"
},
{
"name": "Python",
"bytes": "196834"
},
{
"name": "Ruby",
"bytes": "147"
},
{
"name": "Vue",
"bytes": "107411"
}
],
"symlink_target": ""
} |
"""Unit test cases for the COT.add_disk.COTAddDisk class."""
import filecmp
import os.path
import re
import mock
from COT.commands.tests.command_testcase import CommandTestCase
from COT.commands.add_disk import COTAddDisk
from COT.data_validation import InvalidInputError, ValueMismatchError
from COT.data_validation import ValueUnsupportedError, ValueTooHighError
from COT.disks import DiskRepresentation
from COT.disks.qcow2 import QCOW2
# pylint: disable=missing-param-doc,missing-type-doc
class TestCOTAddDisk(CommandTestCase):
"""Test cases for the COTAddDisk module."""
command_class = COTAddDisk
def test_readiness(self):
"""Test ready_to_run() under various combinations of parameters."""
self.command.package = self.input_ovf
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertTrue(re.search("DISK_IMAGE is a mandatory", reason))
self.assertRaises(InvalidInputError, self.command.run)
self.command.disk_image = self.blank_vmdk
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
self.command.address = "1:0"
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertTrue(re.search("controller", reason))
self.assertRaises(InvalidInputError, self.command.run)
self.command.controller = "ide"
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
# address without controller is not allowed,
# but controller without address is OK
self.command.address = None
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
def test_conflicting_args_1(self):
"""Test conflicting arguments are detected and rejected."""
# TODO - it would be nice to detect this in ready_to_run()
# rather than run()
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
# file2 exists and is mapped to IDE 1:0 but we request IDE 1:1
self.command.controller = "ide"
self.command.address = "1:1"
self.command.file_id = "file2"
self.assertRaises(ValueMismatchError, self.command.run)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
def test_conflicting_args_2(self):
"""Test conflicting arguments are detected and rejected."""
# TODO - it would be nice to detect this in ready_to_run()
# rather than run()
self.command.package = self.input_ovf
self.command.disk_image = self.input_iso
# ovf contains input.iso but we're asking it to overwrite input.vmdk
self.command.file_id = "vmdisk1"
self.assertRaises(ValueMismatchError, self.command.run)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_CDROM)
def test_conflicting_args_3(self):
"""Test conflicting arguments are detected and rejected."""
# TODO - it would be nice to detect this in ready_to_run()
# rather than run()
self.command.package = self.input_ovf
self.command.disk_image = self.input_vmdk
# ovf contains input.vmdk but we're asking it to overwrite input.iso
self.command.controller = "ide"
self.command.address = "1:0"
self.assertRaises(ValueMismatchError, self.command.run)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
def test_new_hard_disk(self):
"""Test adding a new hard disk to the OVF."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:Parent>5</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
blank_size=self.FILE_SIZE['blank.vmdk']))
# Make sure the disk file is copied over
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"disk file should be exported unchanged")
def test_new_hard_disk_relative_path(self):
"""Test adding a new hard disk with relative path to the OVF."""
os.chdir(os.path.dirname(self.blank_vmdk))
self.command.package = os.path.relpath(self.input_ovf)
self.command.disk_image = os.path.basename(self.blank_vmdk)
self.command.run()
# COT should fixup the relative path to absolute path, avoiding this:
# self.assertLogged(**self.FILE_REF_RELATIVE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:Parent>5</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
blank_size=self.FILE_SIZE['blank.vmdk']))
# Make sure the disk file is copied over
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"disk file should be exported unchanged")
def test_new_hard_disk_and_explicit_controller(self):
"""Test adding a hard disk to an explicitly new SCSI controller."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.controller = "scsi"
self.command.address = "1:0"
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:Address>1</rasd:Address>
+ <rasd:Description>SCSI Controller 1</rasd:Description>
+ <rasd:ElementName>SCSI Controller</rasd:ElementName>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>15</rasd:InstanceID>
+ <rasd:Parent>14</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
blank_size=self.FILE_SIZE['blank.vmdk']))
# Make sure the disk file is copied over
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"disk file should be exported unchanged")
def test_new_hard_disk_and_automatic_controller(self):
"""Add a new hard disk and create an IDE controller automatically."""
# Since the primary IDE0 controller is already full in the IOSv OVF,
# COT will need to automatically create IDE1 controller
self.command.package = self.iosv_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.assertLogged(**self.ADDRESS_ON_PARENT_NOT_SPECIFIED)
self.command.finished()
self.check_diff(file1=self.iosv_ovf,
expected="""
<ovf:File ovf:href="input.vmdk" ovf:id="vios-adventerprisek9-m.vmdk" \
ovf:size="{input_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1073741824" ovf:capacityAllocationUnits="byte" \
ovf:diskId="vios-adventerprisek9-m.vmdk" \
ovf:fileRef="vios-adventerprisek9-m.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:Address>1</rasd:Address>
+ <rasd:Description>IDE Controller 1</rasd:Description>
+ <rasd:ElementName>IDE Controller</rasd:ElementName>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:Parent>6</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item ovf:required="false">"""
.format(input_size=self.FILE_SIZE['input.vmdk'],
blank_size=self.FILE_SIZE['blank.vmdk']))
def test_new_hard_disk_v09(self):
"""Test adding a disk to a version 0.9 OVF."""
self.command.package = self.v09_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.command.finished()
# Default controller for generic platform is IDE for hard disks
self.check_diff(file1=self.v09_ovf,
expected="""
<ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{input_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1073741824" ovf:diskId="vmdisk1" \
ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/\
vmdk.html#sparse" />
+ <ovf:Disk ovf:capacity="536870912" ovf:diskId="blank.vmdk" \
ovf:fileRef="blank.vmdk" ovf:format="http://www.vmware.com/interfaces/\
specifications/vmdk.html#streamOptimized" />
</ovf:Section>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:Caption>Hard Disk Drive</rasd:Caption>
+ <rasd:InstanceId>9</rasd:InstanceId>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/blank.vmdk</rasd:HostResource>
+ <rasd:Parent>5</rasd:Parent>
+ <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ </ovf:Item>
</ovf:Section>""".format(input_size=self.FILE_SIZE['input.vmdk'],
blank_size=self.FILE_SIZE['blank.vmdk']))
def test_new_hard_disk_v20_vbox(self):
"""Test adding a new hard disk to a v2.0 OVF from VirtualBox."""
self.command.package = self.v20_vbox_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.command.finished()
# TODO - vbox XML is not very clean so the diffs are large...
# self.check_diff('', file1=self.v20_vbox_ovf)
# ovftool does not consider vbox ovfs to be valid
self.validate_output_with_ovftool = False
def test_overwrite_hard_disk_fileid(self):
"""Overwrite an existing disk by specifying matching file-id."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.file_id = 'file1'
# For coverage's sake, let's change the controller subtype too
self.command.subtype = "virtio"
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.check_diff("""
<ovf:References>
- <ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{input_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="file1" ovf:size="{blank_size}" />
<ovf:File ovf:href="input.iso" ovf:id="file2" ovf:size="{iso_size}" />
...
<ovf:Info>Virtual disk information</ovf:Info>
- <ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
<rasd:InstanceID>3</rasd:InstanceID>
- <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
<rasd:ResourceType>6</rasd:ResourceType>
""".format(input_size=self.FILE_SIZE['input.vmdk'],
blank_size=self.FILE_SIZE['blank.vmdk'],
iso_size=self.FILE_SIZE['input.iso']))
# Make sure the old disk is not copied
self.assertFalse(os.path.exists(os.path.join(self.temp_dir,
"input.vmdk")),
"old disk should be replaced, not exported")
# Make sure the new disk is copied
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"newly added disk should be exported unchanged")
def test_overwrite_hard_disk_address(self):
"""Overwrite an existing disk by matching controller address."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.controller = 'scsi'
self.command.address = "0:0"
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.check_diff("""
<ovf:References>
- <ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{input_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="file1" ovf:size="{blank_size}" />
<ovf:File ovf:href="input.iso" ovf:id="file2" ovf:size="{iso_size}" />
...
<ovf:Info>Virtual disk information</ovf:Info>
- <ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
""".format(input_size=self.FILE_SIZE['input.vmdk'],
blank_size=self.FILE_SIZE['blank.vmdk'],
iso_size=self.FILE_SIZE['input.iso']))
# Make sure the old disk is not copied
self.assertFalse(os.path.exists(os.path.join(self.temp_dir,
"input.vmdk")),
"old disk should be replaced, not exported")
# Make sure the new disk is copied
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"new disk should be exported unchanged")
def test_overwrite_harddisk_with_cdrom(self):
"""Replace a hard disk with a cd-rom."""
self.command.package = self.v09_ovf
self.command.disk_image = self.input_iso
self.command.drive_type = 'cdrom'
self.command.controller = 'scsi'
self.command.address = "0:0"
self.command.run()
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK) # TODO can we block this?
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.assertLogged(**self.DELETING_DISK)
self.assertLogged(**self.DELETING_DISK_SECTION)
self.command.finished()
self.check_diff(file1=self.v09_ovf, expected="""
<ovf:References>
- <ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{vmdk_size}" />
+ <ovf:File ovf:href="input.iso" ovf:id="file1" ovf:size="{iso_size}" />
</ovf:References>
- <ovf:Section xsi:type="ovf:DiskSection_Type">
- <ovf:Info>Meta-information about the virtual disks</ovf:Info>
- <ovf:Disk ovf:capacity="1073741824" ovf:diskId="vmdisk1" \
ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/specifications/vmdk.html#sparse" />
- </ovf:Section>
<ovf:Section xsi:type="ovf:NetworkSection_Type">
...
<rasd:InstanceId>7</rasd:InstanceId>
- <rasd:ResourceType>17</rasd:ResourceType>
- <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:ResourceType>15</rasd:ResourceType>
+ <rasd:HostResource>/file/file1</rasd:HostResource>
<rasd:Parent>4</rasd:Parent>
""".format(vmdk_size=self.FILE_SIZE['input.vmdk'],
iso_size=self.FILE_SIZE['input.iso']))
# Make sure the old disk is not copied
self.assertFalse(os.path.exists(os.path.join(self.temp_dir,
"input.vmdk")),
"old disk should be replaced, not exported")
# Make sure the new disk is copied
self.assertTrue(filecmp.cmp(self.input_iso,
os.path.join(self.temp_dir, "input.iso")),
"new disk should be exported unchanged")
def test_overwrite_cdrom_with_harddisk(self):
"""Replace a cd-rom with a hard disk."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.drive_type = 'harddisk'
self.command.controller = 'ide'
self.command.address = "1:0"
self.command.run()
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{vmdk_size}" />
- <ovf:File ovf:href="input.iso" ovf:id="file2" ovf:size="{iso_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="file2" ovf:size="{blank_size}" />
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
...
<ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="file2" ovf:fileRef="file2" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
<rasd:ElementName>CD-ROM 1</rasd:ElementName>
- <rasd:HostResource>ovf:/file/file2</rasd:HostResource>
+ <rasd:HostResource>ovf:/disk/file2</rasd:HostResource>
<rasd:InstanceID>7</rasd:InstanceID>
<rasd:Parent>4</rasd:Parent>
- <rasd:ResourceType>15</rasd:ResourceType>
+ <rasd:ResourceType>17</rasd:ResourceType>
</ovf:Item>
""".format(vmdk_size=self.FILE_SIZE['input.vmdk'],
iso_size=self.FILE_SIZE['input.iso'],
blank_size=self.FILE_SIZE['blank.vmdk'],
cfg_size=self.FILE_SIZE['sample_cfg.txt']))
# Make sure the old disk is not copied
self.assertFalse(os.path.exists(os.path.join(self.temp_dir,
"input.iso")),
"old disk should be replaced, not exported")
# Make sure the new disk is copied
self.assertTrue(filecmp.cmp(self.blank_vmdk,
os.path.join(self.temp_dir, "blank.vmdk")),
"new disk should be exported unchanged")
def test_disk_conversion(self):
"""Make sure hard disk is converted to stream-optimized VMDK format."""
# Create a qcow2 image and add it as a new disk
new_qcow2 = os.path.join(self.temp_dir, "new.qcow2")
# Make it a small file to keep the test fast
QCOW2.create_file(path=new_qcow2, capacity="16M")
self.command.package = self.input_ovf
self.command.disk_image = new_qcow2
self.command.controller = 'scsi'
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.command.finished()
# Make sure the disk was converted and added to the OVF
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="new.vmdk" ovf:id="new.vmdk" ovf:size="{new_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="16" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="new.vmdk" ovf:fileRef="new.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/new.vmdk</rasd:HostResource>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
new_size=os.path.getsize(os.path.join(self.temp_dir, "new.vmdk"))))
# Make sure the disk was actually converted to the right format
diskrep = DiskRepresentation.from_file(os.path.join(self.temp_dir,
"new.vmdk"))
self.assertEqual(diskrep.disk_format, 'vmdk')
self.assertEqual(diskrep.disk_subformat, "streamOptimized")
def test_disk_conversion_and_replacement(self):
"""Convert a disk to implicitly replace an existing disk."""
# Create a qcow2 image and add it as replacement for the existing vmdk
new_qcow2 = os.path.join(self.temp_dir, "input.qcow2")
# Keep it small!
QCOW2.create_file(path=new_qcow2, capacity="16M")
self.command.package = self.input_ovf
self.command.disk_image = new_qcow2
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
# Make sure the disk was converted and replaced the existing disk
self.check_diff("""
<ovf:References>
- <ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{input_size}" />
+ <ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{new_size}" />
<ovf:File ovf:href="input.iso" ovf:id="file2" ovf:size="{iso_size}" />
...
<ovf:Info>Virtual disk information</ovf:Info>
- <ovf:Disk ovf:capacity="1" ovf:capacityAllocationUnits="byte * 2^30" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="16" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
</ovf:DiskSection>
""".format(input_size=self.FILE_SIZE['input.vmdk'],
iso_size=self.FILE_SIZE['input.iso'],
new_size=os.path.getsize(os.path.join(self.temp_dir,
"input.vmdk"))))
def test_add_disk_no_existing(self):
"""Add a disk to an OVF that doesn't currently have any.
Verify correct creation of various OVF sub-sections.
"""
self.command.package = self.minimal_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.assertLogged(**self.ADDRESS_ON_PARENT_NOT_SPECIFIED)
self.command.finished()
self.check_diff(file1=self.minimal_ovf,
expected="""
<?xml version='1.0' encoding='utf-8'?>
-<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
- <ovf:References />
+<ovf:Envelope xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" \
xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/\
CIM_ResourceAllocationSettingData">
+ <ovf:References>
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
+ </ovf:References>
+ <ovf:DiskSection>
+ <ovf:Info>Virtual disk information</ovf:Info>
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ </ovf:DiskSection>
<ovf:VirtualSystem ovf:id="x">
...
<ovf:Info />
+ <ovf:Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller 0</rasd:Description>
+ <rasd:ElementName>IDE Controller</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:Parent>1</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(blank_size=self.FILE_SIZE['blank.vmdk']))
@mock.patch("COT.commands.command.available_bytes_at_path")
def test_add_disk_insufficient_output_space(self, mock_available):
"""Make sure disk space check is invoked when adding a disk."""
self.command.package = self.minimal_ovf
self.command.ui.default_confirm_response = False
# Enough space for the OVF itself, but not for OVF + added disk
mock_available.return_value = 3 * self.FILE_SIZE['minimal.ovf']
self.command.disk_image = self.blank_vmdk
ready, reason = self.command.ready_to_run()
mock_available.assert_called_once()
self.assertFalse(ready)
self.assertRegex(reason, "Insufficient disk space available")
def test_add_cdrom_to_existing_controller(self):
"""Add a CDROM drive to an existing controller."""
self.command.package = self.input_ovf
self.command.disk_image = self.blank_vmdk
self.command.drive_type = "cdrom"
self.command.controller = "scsi"
self.command.address = "0:1"
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ <rasd:ElementName>CD-ROM Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>14</rasd:InstanceID>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:ResourceType>15</rasd:ResourceType>
+ </ovf:Item>
</ovf:VirtualHardwareSection>
""".format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
blank_size=self.FILE_SIZE['blank.vmdk']))
def test_add_disk_no_room(self):
"""Negative test - add a disk to an OVF whose controllers are full."""
# iosv.ovf already has two disks. Add a third disk...
self.command.package = self.iosv_ovf
self.command.disk_image = self.blank_vmdk
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.assertLogged(**self.ADDRESS_ON_PARENT_NOT_SPECIFIED)
self.command.finished()
self.check_diff(file1=self.iosv_ovf, expected="""
<ovf:File ovf:href="input.vmdk" ovf:id="vios-adventerprisek9-m.vmdk" \
ovf:size="152576" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1073741824" ovf:capacityAllocationUnits="byte" \
ovf:diskId="vios-adventerprisek9-m.vmdk" \
ovf:fileRef="vios-adventerprisek9-m.vmdk" \
ovf:format="http://www.vmware.com/interfaces/specifications/\
vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" \
ovf:format="http://www.vmware.com/interfaces/specifications/\
vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:Address>1</rasd:Address>
+ <rasd:Description>IDE Controller 1</rasd:Description>
+ <rasd:ElementName>IDE Controller</rasd:ElementName>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:Parent>6</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item ovf:required="false">
""".format(blank_size=self.FILE_SIZE['blank.vmdk']))
# Add a fourth disk...
self.command.package = self.temp_file
self.command.disk_image = self.input_iso
self.command.run()
self.assertLogged(**self.DRIVE_TYPE_GUESSED_CDROM)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.command.finished()
self.check_diff(file1=self.iosv_ovf, expected="""
<ovf:File ovf:href="input.vmdk" ovf:id="vios-adventerprisek9-m.vmdk" \
ovf:size="152576" />
+ <ovf:File ovf:href="blank.vmdk" ovf:id="blank.vmdk" \
ovf:size="{blank_size}" />
+ <ovf:File ovf:href="input.iso" ovf:id="input.iso" ovf:size="{iso_size}" />
</ovf:References>
...
<ovf:Disk ovf:capacity="1073741824" ovf:capacityAllocationUnits="byte" \
ovf:diskId="vios-adventerprisek9-m.vmdk" \
ovf:fileRef="vios-adventerprisek9-m.vmdk" ovf:format="http://www.vmware.com/\
interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="512" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="blank.vmdk" ovf:fileRef="blank.vmdk" \
ovf:format="http://www.vmware.com/interfaces/specifications/\
vmdk.html#streamOptimized" />
</ovf:DiskSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:Address>1</rasd:Address>
+ <rasd:Description>IDE Controller 1</rasd:Description>
+ <rasd:ElementName>IDE Controller</rasd:ElementName>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:ResourceSubType>virtio</rasd:ResourceSubType>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/blank.vmdk</rasd:HostResource>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:Parent>6</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ <rasd:ElementName>CD-ROM Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/input.iso</rasd:HostResource>
+ <rasd:InstanceID>8</rasd:InstanceID>
+ <rasd:Parent>6</rasd:Parent>
+ <rasd:ResourceType>15</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item ovf:required="false">
""".format(blank_size=self.FILE_SIZE['blank.vmdk'],
iso_size=self.FILE_SIZE['input.iso']))
# Create a qcow2 image
new_qcow2 = os.path.join(self.temp_dir, "foozle.qcow2")
# Keep it small!
QCOW2.create_file(path=new_qcow2, capacity="16M")
# Try to add a fifth disk - IDE controllers are full!
self.command.package = self.temp_file
self.command.disk_image = new_qcow2
self.assertRaises(ValueTooHighError, self.command.run)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
def test_overwrite_implicit_file_id(self):
"""file_id defaults to filename if not set."""
self.command.package = self.invalid_ovf
self.command.disk_image = self.input_vmdk
self.command.run()
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(**self.CONTROLLER_TYPE_GUESSED_IDE)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK)
self.command.finished()
self.assertLogged(**self.invalid_hardware_warning(
"howlongofaprofilenamecanweusehere", "0", "MiB of RAM"))
self.assertLogged(msg="Removing unused network")
self.check_diff(file1=self.invalid_ovf, expected="""
<ovf:References>
- <ovf:File ovf:href="this_is_a_really_long_filename_for_a_disk.vmdk" \
ovf:id="input.vmdk" ovf:size="{input_size}" />
+ <ovf:File ovf:href="input.vmdk" ovf:id="input.vmdk" \
ovf:size="{input_size}" />
<ovf:File ovf:href="input.iso" ovf:id="input.iso" ovf:size="360448" />
...
</ovf:Network>
- <ovf:Network ovf:name="name-but-no-description" />
</ovf:NetworkSection>
...
</ovf:Item>
+ <ovf:Item>
+ <rasd:AddressOnParent>1</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk Drive</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/input.vmdk</rasd:HostResource>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:Parent>1</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </ovf:Item>
<ovf:Item ovf:configuration="myprofile">
""".format(input_size=self.FILE_SIZE['input.vmdk']))
# ovftool will fail because invalid_ovf has an invalid Disk fileRef
self.validate_output_with_ovftool = False
def test_overwrite_disk_with_bad_host_resource(self):
"""Negative test - invalid HostResource value in OVF."""
self.command.package = self.invalid_ovf
self.command.disk_image = self.blank_vmdk
self.command.controller = "ide"
self.command.address = "0:0"
with self.assertRaises(ValueUnsupportedError) as catcher:
self.command.run()
self.assertTrue(re.search("HostResource", str(catcher.exception)))
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
self.assertLogged(levelname='ERROR',
msg="Unrecognized HostResource format")
def test_overwrite_disk_with_bad_parent_by_file(self):
"""Negative test - invalid parent for disk, identified by filename."""
self.command.package = self.invalid_ovf
self.command.disk_image = self.input_iso
self.assertRaises(LookupError, self.command.run)
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_CDROM)
def test_overwrite_disk_with_bad_parent_by_fileid(self):
"""Negative test - invalid parent for disk, identified by fileid."""
self.command.package = self.invalid_ovf
self.command.disk_image = self.blank_vmdk
self.command.file_id = "input.iso"
self.assertRaises(LookupError, self.command.run)
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
def test_overwrite_disk_with_bad_fileref(self):
"""Negative test - invalid fileref in OVF."""
self.command.package = self.invalid_ovf
self.command.disk_image = self.blank_vmdk
self.command.file_id = "flash2"
self.assertRaises(LookupError, self.command.run)
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.assertLogged(**self.DRIVE_TYPE_GUESSED_HARDDISK)
| {
"content_hash": "90aa3a6ffffe56ef834daca6e4f8aa52",
"timestamp": "",
"source": "github",
"line_count": 859,
"max_line_length": 79,
"avg_line_length": 46.67986030267753,
"alnum_prop": 0.6451693351289342,
"repo_name": "glennmatthews/cot",
"id": "a5798c110726d5c3570a7af10307e4e1b21bfde8",
"size": "40826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COT/commands/tests/test_add_disk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293158"
},
{
"name": "Roff",
"bytes": "37442"
},
{
"name": "Shell",
"bytes": "3840"
}
],
"symlink_target": ""
} |
'''
This program demonstrates a problem I've encountered with the PySide
QListWidget class, using PySide 1.2.1, Qt 4.8.5, and Mac OS X 10.8.5.
To see the problem:
1. Run the program.
2. Scroll down the list.
3. Click on a list item to select it.
4. Press the "Delete" button.
When the item is deleted, the list scrolls to the top. I believe that it
should not scroll to the top.
The problem appears to have something to do with setting the selection
mode of the list to QAbstractItemView.ContiguousSelection. If one
instead sets the selection mode to QAbstractItemView.SingleSelection,
the problem does not occur.
Harold Mills
11 February 2014
'''
import sys
from PySide.QtGui import (
QAbstractItemView, QApplication, QListWidget, QMainWindow,
QPushButton, QVBoxLayout, QWidget)
class TestWindow(QMainWindow):
def __init__(self):
super(TestWindow, self).__init__()
self.setWindowTitle('PySide QListWidget Problem')
self._list = QListWidget(self)
self._list.insertItems(0, [str(i + 1) for i in xrange(50)])
# This line seems to be the problem. Change "Contiguous" to "Single"
# and item deletion does not cause the list to scroll to the top.
self._list.setSelectionMode(QAbstractItemView.ContiguousSelection)
button = QPushButton('Delete', self)
button.clicked.connect(self._on_button_clicked)
box = QVBoxLayout()
box.addWidget(self._list)
box.addWidget(button)
widget = QWidget()
widget.setLayout(box)
self.setCentralWidget(widget)
def _on_button_clicked(self):
l = self._list
for item in l.selectedItems():
l.takeItem(l.row(item))
def _main():
app = QApplication(sys.argv)
window = TestWindow()
window.show()
window.raise_()
app.exec_()
sys.exit()
if __name__ == '__main__':
_main()
| {
"content_hash": "ff42455a5c8abbe99761f646c5130550",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 24.85185185185185,
"alnum_prop": 0.6348733233979136,
"repo_name": "HaroldMills/Maka",
"id": "3d2e1fa349e66922937818b7b91a6fa03e2c49de",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/TestListWidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222632"
}
],
"symlink_target": ""
} |
import logging
from oslo.config import cfg
from jumpgate.common import hooks
cfg.CONF.register_opts([cfg.StrOpt('admin_token',
secret=True,
default='ADMIN')],
group='DEFAULT')
LOG = logging.getLogger(__name__)
@hooks.request_hook(True)
def admin_token(req, resp, kwargs):
auth_token = req.headers.get('X-AUTH-TOKEN', None)
admin_token = cfg.CONF['DEFAULT']['admin_token']
if (admin_token is not None and auth_token is not None
and admin_token == auth_token):
# admin_token authenticates to Jumpgate API, but does not
# provide SLAPI access
req.env['is_admin'] = True
LOG.debug("Admin access permitted")
| {
"content_hash": "34e131c3cf5e5ed03629ae58914d4003",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 30.36,
"alnum_prop": 0.5915678524374176,
"repo_name": "wpf710/app-proxy",
"id": "b21051a097b6219b98248d0ca0f747d5fccca1af",
"size": "759",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jumpgate/common/hooks/admin_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "197844"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
} |
import inspect
import importlib
from pickle import dumps, loads
class Job(object):
@classmethod
def create(cls, func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
assert isinstance(args, tuple), "%r is not a valid args list" % (args, )
assert isinstance(kwargs, dict), "%r is not a valid kwargs dict" % (kwargs, )
job = cls()
if inspect.ismethod(func):
job._instance = func.im_self
job._func_name = func.__name__
elif inspect.isfunction(func):
job._func_name = "%s.%s" % (func.__module__, func.__name__)
job._args = args
job._kwargs = kwargs
job.save()
return job
@classmethod
def fetch(cls, data):
func_name, instance, args, kwargs = loads(str(data))
job = cls()
job._func_name = func_name
job._args = args
job._kwargs = kwargs
job._instance = instance
job.save()
return job
def __init__(self):
self._func_name = None
self._instance = None
self._args = None
self._kwargs = None
self._data = None
def save(self):
self._data = dumps((self._func_name, self._instance, self._args, self._kwargs))
def perform(self):
if self._instance:
return getattr(self._instance, self._func_name)(*self._args, **self._kwargs)
module_name, func_name = self._func_name.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, func_name)
return func(*self._args, **self._kwargs)
| {
"content_hash": "7d21d1cbf08aac9a69e4d2aa5d336812",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 88,
"avg_line_length": 30.381818181818183,
"alnum_prop": 0.5529622980251346,
"repo_name": "huhuchen/asyncqueue",
"id": "1ff634dea9ffabdad0228b234389343cde8c2817",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncqueue/job.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5776"
}
],
"symlink_target": ""
} |
import re
import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.deprecation import RemovedInDjango21Warning
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = 'btree'
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
orders = []
# There would be only 1 row to loop over
for sql, in cursor.fetchall():
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
| {
"content_hash": "c72948d5eb1e1465b96d07ca4081a9ec",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 112,
"avg_line_length": 42.103092783505154,
"alnum_prop": 0.5535422788116225,
"repo_name": "mattseymour/django",
"id": "730793879d85b2d164eb0d6c077423d9c97323fa",
"size": "12252",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/db/backends/sqlite3/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182963"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11845544"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django
from django.conf import settings
from django.utils.encoding import force_text
from unidecode import unidecode
from pybb import defaults
if defaults.PYBB_USE_DJANGO_MAILER:
from mailer import send_html_mail, send_mail
else:
from django.core.mail import send_mail, get_connection
from django.core.mail.message import EmailMultiAlternatives
def send_html_mail(subject, text_msg, html_msg, sender, recipient,
fail_silently=False, auth_user=None, auth_password=None, connection=None):
"""Sends an email with HTML alternative."""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
msg = EmailMultiAlternatives(subject, text_msg, sender, recipient, connection=connection)
msg.attach_alternative(html_msg, "text/html")
msg.send()
def send_mass_html_mail(emails, *args, **kwargs):
"""
Sends emails with html alternative if email item has html content.
Email item is a tuple with an optional html message version :
(subject, text_msg, sender, recipient, [html_msg])
"""
for email in emails:
subject, text_msg, sender, recipient = email[0:4]
html_msg = email[4] if len(email) > 4 else ''
if html_msg:
send_html_mail(subject, text_msg, html_msg, sender, recipient, *args, **kwargs)
else:
send_mail(subject, text_msg, sender, recipient, *args, **kwargs)
def get_image_field_class():
try:
from PIL import Image
except ImportError:
from django.db.models import FileField
return FileField
try:
from sorl.thumbnail import ImageField
except ImportError:
from django.db.models import ImageField
return ImageField
def get_image_field_full_name():
try:
from PIL import Image
except ImportError:
return 'django.db.models.fields.files.FileField'
try:
from sorl.thumbnail import ImageField
name = 'sorl.thumbnail.fields.ImageField'
except ImportError:
from django.db.models import ImageField
name = 'django.db.models.fields.files.ImageField'
return name
def get_user_model():
from django.contrib.auth import get_user_model
return get_user_model()
def get_user_model_path():
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_username_field():
return get_user_model().USERNAME_FIELD
def get_atomic_func():
from django.db.transaction import atomic as atomic_func
return atomic_func
def get_paginator_class():
try:
from pure_pagination import Paginator
pure_pagination = True
except ImportError:
# the simplest emulation of django-pure-pagination behavior
from django.core.paginator import Paginator, Page
class PageRepr(int):
def querystring(self):
return 'page=%s' % self
Page.pages = lambda self: [PageRepr(i) for i in range(1, self.paginator.num_pages + 1)]
pure_pagination = False
return Paginator, pure_pagination
def is_installed(app_name):
from django.apps import apps
return apps.is_installed(app_name)
def get_related_model_class(parent_model, field_name):
return parent_model._meta.get_field(field_name).related_model
def slugify(text):
"""
Slugify function that supports unicode symbols
:param text: any unicode text
:return: slugified version of passed text
"""
from django.utils.text import slugify as django_slugify
return django_slugify(force_text(unidecode(text)))
def is_authenticated(user):
if django.VERSION > (1, 9):
return user.is_authenticated
return user.is_authenticated()
def is_anonymous(user):
if django.VERSION > (1, 9):
return user.is_anonymous
return user.is_anonymous()
| {
"content_hash": "7a739795ec3fd96e1dfc75ba0a36fec0",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 97,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6696787148594378,
"repo_name": "hovel/pybbm",
"id": "5ffa8efb1c41ebea6ec1503edb0dbdd163c0d511",
"size": "3999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pybb/compat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "136463"
},
{
"name": "HTML",
"bytes": "64955"
},
{
"name": "JavaScript",
"bytes": "36945"
},
{
"name": "Python",
"bytes": "396916"
}
],
"symlink_target": ""
} |
"""
Registry's Client API
"""
import json
import os
from oslo.config import cfg
from glance.common import exception
import glance.openstack.common.log as logging
from glance.registry.client.v1 import client
LOG = logging.getLogger(__name__)
registry_client_ctx_opts = [
cfg.BoolOpt('send_identity_headers', default=False,
help=_("Whether to pass through headers containing user "
"and tenant information when making requests to "
"the registry. This allows the registry to use the "
"context middleware without the keystoneclients' "
"auth_token middleware, removing calls to the keystone "
"auth service. It is recommended that when using this "
"option, secure communication between glance api and "
"glance registry is ensured by means other than "
"auth_token middleware.")),
]
CONF = cfg.CONF
CONF.register_opts(registry_client_ctx_opts)
_registry_client = 'glance.registry.client'
CONF.import_opt('registry_client_protocol', _registry_client)
CONF.import_opt('registry_client_key_file', _registry_client)
CONF.import_opt('registry_client_cert_file', _registry_client)
CONF.import_opt('registry_client_ca_file', _registry_client)
CONF.import_opt('registry_client_insecure', _registry_client)
CONF.import_opt('registry_client_timeout', _registry_client)
CONF.import_opt('use_user_token', _registry_client)
CONF.import_opt('admin_user', _registry_client)
CONF.import_opt('admin_password', _registry_client)
CONF.import_opt('admin_tenant_name', _registry_client)
CONF.import_opt('auth_url', _registry_client)
CONF.import_opt('auth_strategy', _registry_client)
CONF.import_opt('auth_region', _registry_client)
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
_CLIENT_CREDS = None
_CLIENT_HOST = None
_CLIENT_PORT = None
_CLIENT_KWARGS = {}
# AES key used to encrypt 'location' metadata
_METADATA_ENCRYPTION_KEY = None
def configure_registry_client():
"""
Sets up a registry client for use in registry lookups
"""
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY
try:
host, port = CONF.registry_host, CONF.registry_port
except cfg.ConfigFileValueError:
msg = _("Configuration option was not valid")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(reason=msg)
except IndexError:
msg = _("Could not find required configuration option")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(reason=msg)
_CLIENT_HOST = host
_CLIENT_PORT = port
_METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key
_CLIENT_KWARGS = {
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
'key_file': CONF.registry_client_key_file,
'cert_file': CONF.registry_client_cert_file,
'ca_file': CONF.registry_client_ca_file,
'insecure': CONF.registry_client_insecure,
'timeout': CONF.registry_client_timeout,
}
if not CONF.use_user_token:
configure_registry_admin_creds()
def configure_registry_admin_creds():
global _CLIENT_CREDS
if CONF.auth_url or os.getenv('OS_AUTH_URL'):
strategy = 'keystone'
else:
strategy = CONF.auth_strategy
_CLIENT_CREDS = {
'user': CONF.admin_user,
'password': CONF.admin_password,
'username': CONF.admin_user,
'tenant': CONF.admin_tenant_name,
'auth_url': CONF.auth_url,
'strategy': strategy,
'region': CONF.auth_region,
}
def get_registry_client(cxt):
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
global _METADATA_ENCRYPTION_KEY
kwargs = _CLIENT_KWARGS.copy()
if CONF.use_user_token:
kwargs['auth_tok'] = cxt.auth_tok
if _CLIENT_CREDS:
kwargs['creds'] = _CLIENT_CREDS
if CONF.send_identity_headers:
identity_headers = {
'X-User-Id': cxt.user,
'X-Tenant-Id': cxt.tenant,
'X-Roles': ','.join(cxt.roles),
'X-Identity-Status': 'Confirmed',
'X-Service-Catalog': json.dumps(cxt.service_catalog),
}
kwargs['identity_headers'] = identity_headers
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT,
_METADATA_ENCRYPTION_KEY, **kwargs)
def get_images_list(context, **kwargs):
c = get_registry_client(context)
return c.get_images(**kwargs)
def get_images_detail(context, **kwargs):
c = get_registry_client(context)
return c.get_images_detailed(**kwargs)
def get_image_metadata(context, image_id):
c = get_registry_client(context)
return c.get_image(image_id)
def add_image_metadata(context, image_meta):
LOG.debug(_("Adding image metadata..."))
c = get_registry_client(context)
return c.add_image(image_meta)
def update_image_metadata(context, image_id, image_meta,
purge_props=False):
LOG.debug(_("Updating image metadata for image %s..."), image_id)
c = get_registry_client(context)
return c.update_image(image_id, image_meta, purge_props)
def delete_image_metadata(context, image_id):
LOG.debug(_("Deleting image metadata for image %s..."), image_id)
c = get_registry_client(context)
return c.delete_image(image_id)
def get_image_members(context, image_id):
c = get_registry_client(context)
return c.get_image_members(image_id)
def get_member_images(context, member_id):
c = get_registry_client(context)
return c.get_member_images(member_id)
def replace_members(context, image_id, member_data):
c = get_registry_client(context)
return c.replace_members(image_id, member_data)
def add_member(context, image_id, member_id, can_share=None):
c = get_registry_client(context)
return c.add_member(image_id, member_id, can_share=can_share)
def delete_member(context, image_id, member_id):
c = get_registry_client(context)
return c.delete_member(image_id, member_id)
| {
"content_hash": "24a0eaaf1c8d3de7b4a240dec3afae85",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 33.41847826086956,
"alnum_prop": 0.6615709871523825,
"repo_name": "cloudbau/glance",
"id": "b31d832fe8c678c610e4693c948526250276dbb8",
"size": "6835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/registry/client/v1/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2489476"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SitemapsConfig(AppConfig):
name = 'yepes.contrib.sitemaps'
verbose_name = _('Sitemaps')
| {
"content_hash": "0b86857dfaeb8b1d44ee7657e71740cd",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 23.6,
"alnum_prop": 0.75,
"repo_name": "samuelmaudo/yepes",
"id": "6e782b90756d5825e7aa3b8e6773e3d360addee1",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yepes/contrib/sitemaps/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1485"
},
{
"name": "CSS",
"bytes": "2805"
},
{
"name": "HTML",
"bytes": "18543"
},
{
"name": "JavaScript",
"bytes": "56039"
},
{
"name": "Python",
"bytes": "2415982"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import fnmatch
import imp
import socket
import sys
import traceback
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class METRON${metron.short.version}ServiceAdvisor(service_advisor.ServiceAdvisor):
def getServiceComponentLayoutValidations(self, services, hosts):
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
metronParsersHost = self.getHosts(componentsList, "METRON_PARSERS")[0]
metronEnrichmentMaster = self.getHosts(componentsList, "METRON_ENRICHMENT_MASTER")[0]
metronProfilerHost = self.getHosts(componentsList, "METRON_PROFILER")[0]
metronPcapHost = self.getHosts(componentsList, "METRON_PCAP")[0]
metronIndexingHost = self.getHosts(componentsList, "METRON_INDEXING")[0]
metronRESTHost = self.getHosts(componentsList, "METRON_REST")[0]
metronManagementUIHost = self.getHosts(componentsList, "METRON_MANAGEMENT_UI")[0]
metronAlertsUIHost = self.getHosts(componentsList, "METRON_ALERTS_UI")[0]
hbaseClientHosts = self.getHosts(componentsList, "HBASE_CLIENT")
hdfsClientHosts = self.getHosts(componentsList, "HDFS_CLIENT")
zookeeperClientHosts = self.getHosts(componentsList, "ZOOKEEPER_CLIENT")
kafkaBrokers = self.getHosts(componentsList, "KAFKA_BROKER")
stormSupervisors = self.getHosts(componentsList, "SUPERVISOR")
items = []
# Metron Must Co-locate with KAFKA_BROKER and STORM_SUPERVISOR
if metronParsersHost not in kafkaBrokers:
message = "Metron must be colocated with an instance of KAFKA BROKER"
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_PARSERS', "host": metronParsersHost })
if metronParsersHost not in stormSupervisors:
message = "Metron must be colocated with an instance of STORM SUPERVISOR"
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_PARSERS', "host": metronParsersHost })
if metronRESTHost not in stormSupervisors:
message = "Metron REST must be colocated with an instance of STORM SUPERVISOR"
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_REST', "host": metronRESTHost })
if metronParsersHost != metronRESTHost:
message = "Metron REST must be co-located with Metron Parsers on {0}".format(metronParsersHost)
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_REST', "host": metronRESTHost })
if metronParsersHost != metronEnrichmentMaster:
message = "Metron Enrichment Master must be co-located with Metron Parsers on {0}".format(metronParsersHost)
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_ENRICHMENT_MASTER', "host": metronEnrichmentMaster })
if metronParsersHost != metronIndexingHost:
message = "Metron Indexing must be co-located with Metron Parsers on {0}".format(metronParsersHost)
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_INDEXING', "host": metronIndexingHost })
if metronParsersHost != metronPcapHost:
message = "Metron PCAP must be co-located with Metron Parsers on {0}".format(metronParsersHost)
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_PCAP', "host": metronPcapHost })
if metronParsersHost != metronProfilerHost:
message = "Metron Profiler must be co-located with Metron Parsers on {0}".format(metronParsersHost)
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_PROFILER', "host": metronProfilerHost })
# Enrichment Master also needs ZK Client, but this is already guaranteed by being colocated with Parsers Master
if metronParsersHost not in zookeeperClientHosts:
message = "Metron must be co-located with an instance of Zookeeper Client"
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_PARSERS', "host": metronParsersHost })
# Enrichment Master also needs HDFS clients, but this is already guaranteed by being colocated with Parsers Master
if metronParsersHost not in hdfsClientHosts:
message = "Metron must be co-located with an instance of HDFS Client"
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_PARSERS', "host": metronParsersHost })
if metronEnrichmentMaster not in hbaseClientHosts:
message = "Metron Enrichment Master must be co-located with an instance of HBase Client"
items.append({ "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'METRON_ENRICHMENT_MASTER', "host": metronEnrichmentMaster })
if metronManagementUIHost != metronAlertsUIHost:
message = "Metron Alerts UI must be co-located with Metron Management UI on {0}".format(metronManagementUIHost)
items.append({ "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'METRON_ALERTS_UI', "host": metronAlertsUIHost })
return items
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
# validate recommended properties in storm-site
siteName = "storm-site"
method = self.validateSTORMSiteConfigurations
items = self.validateConfigurationsForSite(configurations, recommendedDefaults, services, hosts, siteName, method)
return items
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
is_secured = self.isSecurityEnabled(services)
#Suggest Storm Rest URL
if "storm-site" in services["configurations"]:
stormUIServerHost = self.getComponentHostNames(services, "STORM", "STORM_UI_SERVER")[0]
stormUIServerPort = services["configurations"]["storm-site"]["properties"]["ui.port"]
stormUIProtocol = "http://"
if "ui.https.port" in services["configurations"]["storm-site"]["properties"]:
stormUIServerPort = services["configurations"]["storm-site"]["properties"]["ui.https.port"]
stormUIProtocol = "https://"
stormUIServerURL = stormUIProtocol + stormUIServerHost + ":" + stormUIServerPort
putMetronEnvProperty = self.putProperty(configurations, "metron-env", services)
putMetronEnvProperty("storm_rest_addr",stormUIServerURL)
storm_site = services["configurations"]["storm-site"]["properties"]
putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
for property, desired_value in self.getSTORMSiteDesiredValues(is_secured).iteritems():
if property not in storm_site:
putStormSiteProperty(property, desired_value)
elif property == "topology.classpath" and storm_site[property] != desired_value:
topologyClasspath = storm_site[property]
#check that desired values exist in topology.classpath. append them if they do not
for path in desired_value.split(':'):
if path not in topologyClasspath:
topologyClasspath += ":" + path
putStormSiteProperty(property,topologyClasspath)
#Suggest Zeppelin Server URL
if "zeppelin-config" in services["configurations"]:
zeppelinServerHost = self.getComponentHostNames(services, "ZEPPELIN", "ZEPPELIN_MASTER")[0]
zeppelinServerPort = services["configurations"]["zeppelin-config"]["properties"]["zeppelin.server.port"]
zeppelinServerUrl = zeppelinServerHost + ":" + zeppelinServerPort
putMetronEnvProperty = self.putProperty(configurations, "metron-env", services)
putMetronEnvProperty("zeppelin_server_url", zeppelinServerUrl)
#Suggest Zookeeper quorum
if "solr-cloud" in services["configurations"]:
zookeeperHost = self.getComponentHostNames(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")[0]
zookeeperClientPort = services["configurations"]["zoo.cfg"]["properties"]["clientPort"]
solrZkDir = services["configurations"]["solr-cloud"]["properties"]["solr_cloud_zk_directory"]
solrZookeeperUrl = zookeeperHost + ":" + zookeeperClientPort + solrZkDir
putMetronEnvProperty = self.putProperty(configurations, "metron-env", services)
putMetronEnvProperty("solr_zookeeper_url", solrZookeeperUrl)
def validateSTORMSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
# Determine if the cluster is secured
is_secured = self.isSecurityEnabled(services)
storm_site = properties
validationItems = []
for property, desired_value in self.getSTORMSiteDesiredValues(is_secured).iteritems():
if property not in storm_site :
message = "Metron requires this property to be set to the recommended value of " + desired_value
item = self.getErrorItem(message) if property == "topology.classpath" else self.getWarnItem(message)
validationItems.append({"config-name": property, "item": item})
elif storm_site[property] != desired_value:
topologyClasspath = storm_site[property]
for path in desired_value.split(':'):
if path not in topologyClasspath:
message = "Metron requires this property to contain " + desired_value
item = self.getErrorItem(message)
validationItems.append({"config-name": property, "item": item})
return self.toConfigurationValidationProblems(validationItems, "storm-site")
def getSTORMSiteDesiredValues(self, is_secured):
storm_site_desired_values = {
"topology.classpath" : "/etc/hbase/conf:/etc/hadoop/conf"
}
return storm_site_desired_values
| {
"content_hash": "ebbdc8384fa93b84dd91e491bd570a92",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 170,
"avg_line_length": 59.18181818181818,
"alnum_prop": 0.6811742618194231,
"repo_name": "justinleet/incubator-metron",
"id": "36cec7cc9df199ae59100b29bfe3f479ed5f1550",
"size": "11752",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/service_advisor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "11974"
},
{
"name": "Bro",
"bytes": "5403"
},
{
"name": "C",
"bytes": "45683"
},
{
"name": "C++",
"bytes": "15723"
},
{
"name": "CMake",
"bytes": "5605"
},
{
"name": "CSS",
"bytes": "703394"
},
{
"name": "HTML",
"bytes": "982173"
},
{
"name": "Java",
"bytes": "4033820"
},
{
"name": "JavaScript",
"bytes": "157826"
},
{
"name": "Makefile",
"bytes": "4867"
},
{
"name": "OpenEdge ABL",
"bytes": "70365"
},
{
"name": "Python",
"bytes": "197631"
},
{
"name": "Ruby",
"bytes": "22800"
},
{
"name": "Scala",
"bytes": "2700"
},
{
"name": "Shell",
"bytes": "135728"
},
{
"name": "TypeScript",
"bytes": "532518"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
print("Output file" + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return "%s/blk%05d.dat" % (self.settings['input'], fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file" + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" in blkmap:
print("not found")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| {
"content_hash": "0ac1ab7a55cb269acb0c54eb08e4bbd3",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 108,
"avg_line_length": 28.682758620689654,
"alnum_prop": 0.6710747775907671,
"repo_name": "my-first/octocoin",
"id": "939bba3b34a3ef9c3d80d1f34d7e6379598e6ebf",
"size": "8605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master-0.10",
"path": "contrib/linearize/linearize-data.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "343323"
},
{
"name": "C++",
"bytes": "3532257"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18088"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "144762"
},
{
"name": "Makefile",
"bytes": "83451"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "222283"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "40592"
}
],
"symlink_target": ""
} |
def parseFVexception(e):
import re
try:
r = re.findall("!!(.*?)!!",str(e))
except Exception,a:
return str(e)
if r:
return r[0]
else:
return str(e)
class startAggregateException(Exception):
def __init__(self, slice, agg):
self.value = value
def __str__(self):
return repr("Could not start Aggregate Manager: %s in slice: %s." %(agg.name, slice.name))
| {
"content_hash": "ffdecdcc1101c19f368a45eec57990b2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 25.625,
"alnum_prop": 0.5853658536585366,
"repo_name": "avlach/univbris-ocf",
"id": "da282208871a0227bc7157d5ed8b5c3516a85379",
"size": "535",
"binary": false,
"copies": "2",
"ref": "refs/heads/ofelia.opticaldevelopment",
"path": "expedient/src/python/expedient/clearinghouse/slice/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127542"
},
{
"name": "JavaScript",
"bytes": "289680"
},
{
"name": "Perl",
"bytes": "4421"
},
{
"name": "Python",
"bytes": "3446617"
},
{
"name": "Racket",
"bytes": "32770"
},
{
"name": "Shell",
"bytes": "7609"
}
],
"symlink_target": ""
} |
""" upload.py - module for the upload command """
import config
import os
import shutil
import sys
import zipfile
def main(arguments):
""" main routine for upload command """
# pull out arguments
package = arguments.package
version = arguments.version
filepath = arguments.file
# read configuration
configuration = config.read()
if not config.verify(configuration):
sys.stdout.write('Please configure the repopath before use.\n')
return 1
target = os.path.join(configuration['repopath'], package, version)
# verify filepath exists and is valid
if not zipfile.is_zipfile(filepath):
sys.stderr.write('Package file must be a valid ZIP file.\n')
return 1
# attempt to create the target in the repo
if os.path.isdir(target):
sys.stderr.write('Packae version already exists in the repo.\n')
return 1
try:
os.makedirs(target)
except IOError:
sys.stderr.write('Error creating repo target for the package.\n')
return 1
# attempt to copy package into repo
try:
shutil.copy(filepath, target)
except IOError:
sys.stderr.write('Error adding file to repository.\n')
return 1
print('Uploaded {0} version {1} to the repository.'
.format(package, version))
return 0
| {
"content_hash": "dc36df35f82c8ac9ac461197521a95c4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 73,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.65625,
"repo_name": "rmkraus/zippy",
"id": "61d785e2c039731ae848498436fc97a00069212b",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zippy/upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "15567"
}
],
"symlink_target": ""
} |
""" Sahana Eden Patient Model
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3PatientModel",)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3PatientModel(S3Model):
"""
"""
names = ("patient_patient",
"patient_relative",
"patient_home",
)
def model(self):
T = current.T
db = current.db
gis = current.gis
person_id = self.pr_person_id
messages = current.messages
add_components = self.add_components
crud_strings = current.response.s3.crud_strings
# ---------------------------------------------------------------------
# Patients
tablename = "patient_patient"
self.define_table(tablename,
person_id(comment = None,
label = T("Patient"),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(),
),
Field("country",
label = T("Current Location Country"),
represent = lambda code: \
gis.get_country(code, key_type="code") or \
messages.UNKNOWN_OPT,
requires = IS_EMPTY_OR(IS_IN_SET_LAZY(
lambda: gis.get_countries(key_type="code"),
zero = messages.SELECT_LOCATION)),
),
self.hms_hospital_id(
empty = False,
label = T("Current Location Treating Hospital")
),
Field("phone", requires=s3_phone_requires,
label = T("Current Location Phone Number"),
),
Field("injuries", "text",
label = T("Injuries"),
widget = s3_comments_widget,
),
s3_date("treatment_date",
label = T("Date of Treatment"),
),
s3_date("return_date",
label = T("Expected Return Home"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add New Patient"),
title_display = T("Patient Details"),
title_list = T("Patients"),
title_update = T("Edit Patient"),
label_list_button = T("List Patients"),
label_delete_button = T("Delete Patient"),
msg_record_created = T("Patient added"),
msg_record_modified = T("Patient updated"),
msg_record_deleted = T("Patient deleted"),
msg_list_empty = T("No Patients currently registered"))
patient_represent = patient_PatientRepresent(lookup = "patient_patient")
# Reusable Field for Component Link
patient_id = S3ReusableField("patient_id", "reference %s" % tablename,
ondelete = "RESTRICT",
represent = patient_represent,
requires = IS_ONE_OF(db,
"patient_patient.id",
patient_represent),
)
# Search method
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$local_name",
],
label = T("Search"),
comment=T("To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients."),
),
S3OptionsFilter("country",
label = messages.COUNTRY,
cols = 2,
hidden = True,
),
S3OptionsFilter("hospital_id",
label = T("Hospital"),
cols = 2,
hidden = True,
),
]
# Configuration
self.configure(tablename,
filter_widgets = filter_widgets,
)
# Components
add_components(tablename,
# Relatives
patient_relative = {"joinby": "patient_id",
"multiple": False,
},
# Homes
patient_home = {"joinby": "patient_id",
"multiple": False,
},
)
# ---------------------------------------------------------------------
# Relatives
tablename = "patient_relative"
self.define_table(tablename,
patient_id(readable = False,
writable = False),
person_id(comment = None,
label = T("Accompanying Relative"),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_RELATIVE = T("New Relative")
crud_strings[tablename] = Storage(
label_create = ADD_RELATIVE,
title_display = T("Relative Details"),
title_list = T("Relatives"),
title_update = T("Edit Relative"),
label_list_button = T("List Relatives"),
label_delete_button = T("Delete Relative"),
msg_record_created = T("Relative added"),
msg_record_modified = T("Relative updated"),
msg_record_deleted = T("Relative deleted"),
msg_list_empty = T("No Relatives currently registered"))
# ---------------------------------------------------------------------
# Homes
#
# @ToDo: Default the Home Phone Number from the Person, if available
# @ToDo: Onvalidation to set the Relative's Contact
tablename = "patient_home"
self.define_table(tablename,
patient_id(readable = False,
writable = False),
person_id(comment = None,
label = T("Home Relative"),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(),
),
#person_id(label = T("Home Relative")),
self.gis_location_id(
label = T("Home City"),
requires = IS_LOCATION(level="L2"),
widget = S3LocationAutocompleteWidget(level="L2"),
),
Field("phone",
label = T("Home Phone Number"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add New Home"),
title_display = T("Home Details"),
title_list = T("Homes"),
title_update = T("Edit Home"),
label_list_button = T("List Homes"),
label_delete_button = T("Delete Home"),
msg_record_created = T("Home added"),
msg_record_modified = T("Home updated"),
msg_record_deleted = T("Home deleted"),
msg_list_empty = T("No Homes currently registered"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class patient_PatientRepresent(S3Represent):
"""
Representation of Patient names by their full name
"""
def lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for Patient names
@param key: Key for patient table
@param values: Patient IDs
"""
table = self.table
ptable = current.s3db.pr_person
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = (key.belongs(values))
left = ptable.on(table.person_id == ptable.id)
db = current.db
rows = db(query).select(patient_patient.id,
pr_person.first_name,
pr_person.middle_name,
pr_person.last_name,
limitby = (0, count),
left = left)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row for a particular patient
@param row: patient_patient Row
"""
try:
return s3_fullname(row)
except:
return current.messages.UNKNOWN_OPT
# END =========================================================================
| {
"content_hash": "c846a86ef265625743dbdd3cbf0c5fc0",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 208,
"avg_line_length": 40.86572438162544,
"alnum_prop": 0.42343277129269347,
"repo_name": "flavour/ifrc_qa",
"id": "6e4368865f3941c0c5f26e63db46958fd326f482",
"size": "11590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3db/patient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3347085"
},
{
"name": "HTML",
"bytes": "1367849"
},
{
"name": "JavaScript",
"bytes": "20092291"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31198396"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3260831"
}
],
"symlink_target": ""
} |
"""
Author: Ang Ming Liang
Please run the following command before running the script
wget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py
or curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py
Then, make sure to get your kaggle.json from kaggle.com then run
mkdir /root/.kaggle
cp kaggle.json /root/.kaggle/kaggle.json
chmod 600 /root/.kaggle/kaggle.json
rm kaggle.json
to copy kaggle.json into a folder first
"""
import superimport
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm
from torchvision.utils import make_grid
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from einops import rearrange
from tqdm import tqdm
from data import CelebADataset, CelebADataModule
from torch import Tensor
from argparse import ArgumentParser
from typing import Any, Optional
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from argparse import ArgumentParser
class DCGANGenerator(nn.Module):
def __init__(self, latent_dim: int, feature_maps: int, image_channels: int) -> None:
"""
Args:
latent_dim: Dimension of the latent space
feature_maps: Number of feature maps to use
image_channels: Number of channels of the images from the dataset
"""
super().__init__()
self.gen = nn.Sequential(
self._make_gen_block(latent_dim, feature_maps * 8, kernel_size=4, stride=1, padding=0),
self._make_gen_block(feature_maps * 8, feature_maps * 4),
self._make_gen_block(feature_maps * 4, feature_maps * 2),
self._make_gen_block(feature_maps * 2, feature_maps),
self._make_gen_block(feature_maps, image_channels, last_block=True),
)
@staticmethod
def _make_gen_block(
in_channels: int,
out_channels: int,
kernel_size: int = 4,
stride: int = 2,
padding: int = 1,
bias: bool = False,
last_block: bool = False,
use_relu: bool = False
) -> nn.Sequential:
if not last_block:
gen_block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.Relu() if use_relu else nn.Mish(),
)
else:
gen_block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.Sigmoid(),
)
return gen_block
def forward(self, noise: Tensor) -> Tensor:
return self.gen(noise)
class DCGANDiscriminator(nn.Module):
def __init__(self, feature_maps: int, image_channels: int) -> None:
"""
Args:
feature_maps: Number of feature maps to use
image_channels: Number of channels of the images from the dataset
"""
super().__init__()
self.disc = nn.Sequential(
self._make_disc_block(image_channels, feature_maps, batch_norm=False),
self._make_disc_block(feature_maps, feature_maps * 2),
self._make_disc_block(feature_maps * 2, feature_maps * 4),
self._make_disc_block(feature_maps * 4, feature_maps * 8),
self._make_disc_block(feature_maps * 8, 1, kernel_size=4, stride=1, padding=0, last_block=True),
)
@staticmethod
def _make_disc_block(
in_channels: int,
out_channels: int,
kernel_size: int = 4,
stride: int = 2,
padding: int = 1,
bias: bool = False,
batch_norm: bool = True,
last_block: bool = False,
) -> nn.Sequential:
if not last_block:
disc_block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.BatchNorm2d(out_channels) if batch_norm else nn.Identity(),
nn.LeakyReLU(0.2, inplace=True),
)
else:
disc_block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.Sigmoid(),
)
return disc_block
def forward(self, x: Tensor) -> Tensor:
return self.disc(x).view(-1, 1).squeeze(1)
class DCGAN(LightningModule):
"""
DCGAN implementation.
Example::
from pl_bolts.models.gans import DCGAN
m = DCGAN()
Trainer(gpus=2).fit(m)
Example CLI::
# mnist
python dcgan_module.py --gpus 1
# cifar10
python dcgan_module.py --gpus 1 --dataset cifar10 --image_channels 3
"""
def __init__(
self,
beta1: float = 0.5,
feature_maps_gen: int = 64,
feature_maps_disc: int = 64,
image_channels: int = 3,
latent_dim: int = 100,
learning_rate: float = 0.0002,
topk: Optional[int] = 144,
**kwargs: Any,
) -> None:
"""
Args:
beta1: Beta1 value for Adam optimizer
feature_maps_gen: Number of feature maps to use for the generator
feature_maps_disc: Number of feature maps to use for the discriminator
image_channels: Number of channels of the images from the dataset
latent_dim: Dimension of the latent space
learning_rate: Learning rate
"""
super().__init__()
self.save_hyperparameters()
self.generator = self._get_generator()
self.discriminator = self._get_discriminator()
self.criterion = nn.BCELoss()
self.noise_factor=0
self.topk= topk
def _get_generator(self) -> nn.Module:
generator = DCGANGenerator(self.hparams.latent_dim, self.hparams.feature_maps_gen, self.hparams.image_channels)
generator.apply(self._weights_init)
return generator
def _get_discriminator(self) -> nn.Module:
discriminator = DCGANDiscriminator(self.hparams.feature_maps_disc, self.hparams.image_channels)
discriminator.apply(self._weights_init)
return discriminator
@staticmethod
def _weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight, 1.0, 0.02)
torch.nn.init.zeros_(m.bias)
def configure_optimizers(self):
lr = self.hparams.learning_rate
betas = (self.hparams.beta1, 0.999)
opt_disc = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=betas)
opt_gen = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=betas)
return [opt_disc, opt_gen], []
def forward(self, noise: Tensor) -> Tensor:
"""
Generates an image given input noise
Example::
noise = torch.rand(batch_size, latent_dim)
gan = GAN.load_from_checkpoint(PATH)
img = gan(noise)
"""
noise = noise.view(*noise.shape, 1, 1)
return self.generator(noise)
def training_step(self, batch, batch_idx, optimizer_idx):
real, _ = batch
# Train discriminator
result = None
if optimizer_idx == 0:
result = self._disc_step(real)
# Train generator
if optimizer_idx == 1:
result = self._gen_step(real)
return result
def _disc_step(self, real: Tensor) -> Tensor:
disc_loss = self._get_disc_loss(real)
self.log("loss/disc", disc_loss, on_epoch=True)
return disc_loss
def _gen_step(self, real: Tensor) -> Tensor:
gen_loss = self._get_gen_loss(real)
self.log("loss/gen", gen_loss, on_epoch=True)
return gen_loss
def _get_disc_loss(self, real: Tensor, smooth=0) -> Tensor:
# Train with real
real = real + self.noise_factor*torch.rand_like(real)
real_pred = self.discriminator(real)
real_gt = smooth*torch.rand_like(real_pred)+(1-smooth)
real_loss = self.criterion(real_pred, real_gt)
# Train with fake
fake_pred = self._get_fake_pred(real)
fake_gt = smooth*torch.rand_like(fake_pred)
fake_loss = self.criterion(fake_pred, fake_gt)
disc_loss = real_loss + fake_loss
return disc_loss
def _get_gen_loss(self, real: Tensor) -> Tensor:
# Train with fake
fake_pred = self._get_fake_pred(real)
topk_predictions = torch.topk( fake_pred , self.topk )[0]
fake_gt = torch.ones_like(topk_predictions)
gen_loss = self.criterion(topk_predictions, fake_gt)
return gen_loss
def _get_fake_pred(self, real: Tensor) -> Tensor:
batch_size = len(real)
noise = self._get_noise(batch_size, self.hparams.latent_dim)
fake = self(noise)
fake = fake + self.noise_factor*torch.rand_like(real)
fake_pred = self.discriminator(fake)
return fake_pred
def _get_noise(self, n_samples: int, latent_dim: int) -> Tensor:
return torch.randn(n_samples, latent_dim, device=self.device)
@staticmethod
def add_model_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--beta1", default=0.5, type=float)
parser.add_argument("--feature_maps_gen", default=64, type=int)
parser.add_argument("--feature_maps_disc", default=64, type=int)
parser.add_argument("--latent_dim", default=100, type=int)
parser.add_argument("--learning_rate", default=0.0002, type=float)
parser.add_argument("--topk", default=10, type=float)
return parser
def plt_image_generated(m, size, threshold=1, fname="generated.png"):
plt.figure(figsize=(size,size))
values = truncnorm.rvs(-threshold, threshold, size=(64, 100))
z = torch.from_numpy(values).float()
imgs = rearrange(make_grid(m(z)), 'c h w -> h w c').detach().numpy()
plt.imshow(imgs)
plt.savefig(fname)
def test_scaling(dm):
# Making sure the scalling is between 0-1
for batch in tqdm(dm.train_dataloader()):
x, y = batch
assert 1 >= x.max()
assert 0 <= x.min()
assert torch.any(x < 1)
assert torch.any(x > 0)
def ewa(
averaged_model_parameter: torch.Tensor, model_parameter: torch.Tensor, num_averaged: torch.LongTensor
, smooth=0.9) -> torch.FloatTensor:
"""
Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L95-L97
"""
alpha = smooth/ (num_averaged + 1)
return averaged_model_parameter*(1-alpha) + model_parameter * alpha
if __name__ == "__main__":
parser = ArgumentParser(description='Hyperparameters for our experiments')
parser.add_argument('--seed', type=int, default=1, help="random seed")
parser.add_argument('--image-size', type=int, default=64, help="Image size")
parser.add_argument('--crop-size', type=int, default=128, help="Crop size")
parser.add_argument('--bs', type=int, default=144, help="batch size")
parser.add_argument('--data-path', type=str, default="kaggle", help="batch size")
parser.add_argument('--gpus', type=int, default=1, help="gpu use")
parser.add_argument('--epochs', type=int, default=50, help="Num of epochs")
parser = DCGAN.add_model_specific_args(parser)
# Hyperparameters
hparams = parser.parse_args()
SEED = hparams.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
cudnn.deterministic = True
cudnn.benchmark = False
IMAGE_SIZE = hparams.image_size
BATCH_SIZE = hparams.bs
CROP = hparams.crop_size
DATA_PATH = hparams.data_path
trans = []
trans.append(transforms.RandomHorizontalFlip())
if CROP > 0:
trans.append(transforms.CenterCrop(CROP))
trans.append(transforms.Resize(IMAGE_SIZE))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
ds = CelebADataset(root='kaggle', split='test', target_type='attr', download=True)
dm = CelebADataModule(data_dir=DATA_PATH,
target_type='attr',
train_transform=transform,
val_transform=transform,
download=True,
batch_size=BATCH_SIZE,
num_workers=1)
dm.prepare_data() # force download now
dm.setup() # force make data loaders now
m = DCGAN()
checkpoint_callback = ModelCheckpoint(monitor='loss/gen_epoch',
dirpath='./checkpoints',
filename='sample-celeba-{epoch:02d}-{gan_loss:.2f}',
save_top_k=3)
runner = Trainer(
logger=None,
gpus = hparams.gpus,
max_epochs = hparams.epochs,
callbacks=[checkpoint_callback])
runner.fit(m, datamodule=dm)
torch.save(m.state_dict(), "dcgan.ckpt")
plt_image_generated(m, 10)
| {
"content_hash": "500178905b424cc876d076607d4b75d2",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 119,
"avg_line_length": 36.39837398373984,
"alnum_prop": 0.6047948775221502,
"repo_name": "probml/pyprobml",
"id": "deadf65b98d99c11e834c74b7e59c24564fb3bda",
"size": "13455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deprecated/scripts/dcgan_celeba_lightning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "410080351"
},
{
"name": "MATLAB",
"bytes": "41510"
},
{
"name": "Python",
"bytes": "1842224"
},
{
"name": "R",
"bytes": "576"
},
{
"name": "Shell",
"bytes": "45"
},
{
"name": "TeX",
"bytes": "302617"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import enumfields.fields
import shop.models
import shop.enums
import djmoney.models.fields
from decimal import Decimal
import uuid
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('date_created', models.DateTimeField(null=True, verbose_name='date created', auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True, null=True, verbose_name='date modified')),
('id', models.UUIDField(serialize=False, primary_key=True, default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=64)),
('code', models.CharField(max_length=13)),
('description', models.TextField(null=True, blank=True)),
('image', models.ImageField(null=True, blank=True, upload_to=shop.models.generate_product_filename)),
('price_currency', djmoney.models.fields.CurrencyField(max_length=3, choices=[('SEK', 'SEK')], default='SEK', editable=False)),
('price', djmoney.models.fields.MoneyField(currency_choices=(('SEK', 'SEK'),), decimal_places=2, default_currency='SEK', default=Decimal('0.0'), max_digits=10)),
('active', models.BooleanField(default=True)),
('qty', models.IntegerField(default=0)),
],
options={
'verbose_name_plural': 'products',
'verbose_name': 'product',
},
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.UUIDField(serialize=False, primary_key=True, default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=64)),
('image', models.ImageField(null=True, blank=True, upload_to='')),
],
options={
'verbose_name_plural': 'categories',
'verbose_name': 'category',
},
),
migrations.CreateModel(
name='ProductTransaction',
fields=[
('date_created', models.DateTimeField(null=True, verbose_name='date created', auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True, null=True, verbose_name='date modified')),
('id', models.UUIDField(serialize=False, primary_key=True, default=uuid.uuid4, editable=False)),
('qty', models.IntegerField()),
('trx_type', enumfields.fields.EnumIntegerField(enum=shop.enums.TrxType)),
('product', models.ForeignKey(related_name='transactions', to='shop.Product')),
],
options={
'verbose_name_plural': 'transactions',
'verbose_name': 'transaction',
},
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, to='shop.ProductCategory', null=True),
),
]
| {
"content_hash": "81cfd0f0f458a269d603f0b88421e856",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 177,
"avg_line_length": 45.22857142857143,
"alnum_prop": 0.5726468730259002,
"repo_name": "uppsaladatavetare/foobar-api",
"id": "fc387a5d11c60175df09186f1b54ac461d7c2f80",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/shop/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3317"
},
{
"name": "HTML",
"bytes": "10880"
},
{
"name": "JavaScript",
"bytes": "10604"
},
{
"name": "Makefile",
"bytes": "796"
},
{
"name": "Python",
"bytes": "318730"
}
],
"symlink_target": ""
} |
"""Remove deprecated tables
Revision ID: 235b7b9989be
Revises: 2b1ba26f2123
Create Date: 2014-08-04 20:34:04.786894
"""
# revision identifiers, used by Alembic.
revision = '235b7b9989be'
down_revision = '2b1ba26f2123'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Drop now-unneeded tables
op.drop_table('workflow_objects')
op.drop_table('workflow_tasks')
op.drop_table('tasks')
def downgrade():
pass
| {
"content_hash": "9173c909d67aed75b323e0df1615a800",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 40,
"avg_line_length": 17.84,
"alnum_prop": 0.7197309417040358,
"repo_name": "uskudnik/ggrc-core",
"id": "388cc831da3505e152e1b93ef1c4f50bfd83a9d1",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ggrc_workflows/migrations/versions/20140804203404_235b7b9989be_remove_deprecated_tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "232153"
},
{
"name": "Cucumber",
"bytes": "140526"
},
{
"name": "HTML",
"bytes": "6048248"
},
{
"name": "JavaScript",
"bytes": "1878527"
},
{
"name": "Makefile",
"bytes": "5524"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11509"
}
],
"symlink_target": ""
} |
from django.views.generic import DetailView
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.mixins import EmailCreateView
from eventex.subscriptions.models import Subscription
new = EmailCreateView.as_view(model=Subscription, form_class=SubscriptionForm,
email_subject='Confirmação de inscrição')
detail = DetailView.as_view(model=Subscription)
| {
"content_hash": "28d1f0ca361143fcada0fe331eff06b7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 78,
"avg_line_length": 41.4,
"alnum_prop": 0.7898550724637681,
"repo_name": "Golker/wttd",
"id": "808f92513e91da1d044a1481093bbd80a7ae7411",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventex/subscriptions/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37236"
},
{
"name": "JavaScript",
"bytes": "8834"
},
{
"name": "Python",
"bytes": "103988"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import NoReverseMatch
from django.http import HttpResponse
from tastypie import http
from tastypie.resources import Resource
from tastypie.exceptions import InvalidSortError, ImmediateHttpResponse
class dict_object(object):
def __init__(self, dict):
self.dict = dict
def __getattr__(self, item):
return self.dict[item]
def __repr__(self):
return 'dict_object(%r)' % self.dict
def build_content_type(format, encoding='utf-8'):
if 'charset' in format:
return format
return "%s; charset=%s" % (format, encoding)
class JsonResourceMixin(object):
"""
This can be extended to default to json formatting.
"""
# This exists in addition to the mixin since the order of the class
# definitions actually matters
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
# overridden so we can specify a utf-8 charset
# http://stackoverflow.com/questions/17280513/tastypie-json-header-to-use-utf-8
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def determine_format(self, request):
format = super(JsonResourceMixin, self).determine_format(request)
# Tastypie does _not_ support text/html but also does not raise the appropriate UnsupportedFormat exception
# for all other unsupported formats, Tastypie has correct behavior, so we only hack around this one.
if format == 'text/html':
format = 'application/json'
return format
class CorsResourceMixin(object):
"""
Mixin implementing CORS
"""
def create_response(self, *args, **kwargs):
response = super(CorsResourceMixin, self).create_response(*args, **kwargs)
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
return response
def method_check(self, request, allowed=None):
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(lambda x: x.upper() if x else '', allowed))
if request_method == 'options':
response = HttpResponse(allows)
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if request_method not in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
class HqBaseResource(CorsResourceMixin, JsonResourceMixin, Resource):
"""
Convenience class to allow easy adjustment of API resource base classes.
"""
pass
class SimpleSortableResourceMixin(object):
'''
In toastdriven/tastypie the apply_sorting method is moderately Django-specific so it is not
implemented in the Resource class but only in the ModelResource class. This is a
version that is simplified to only support direct field ordering (none of Django's fancy `__` field access)
This can only be mixed in to a resource that passes `obj_list` with type
order_by :: (*str) -> self.__class__
and should also have a meta field `ordering` that specifies the allowed fields
_meta :: [str]
'''
def apply_sorting(self, obj_list, options=None):
if options is None:
options = {}
if 'order_by' not in options:
return obj_list
order_by = options.getlist('order_by')
order_by_args = []
for field in order_by:
if field.startswith('-'):
order = '-'
field_name = field[1:]
else:
order = ''
field_name = field
# Map the field back to the actual attribute
if not field_name in self.fields:
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, self.fields[field_name].attribute))
return obj_list.order_by(*order_by_args)
class DomainSpecificResourceMixin(object):
def get_list(self, request, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.14/tastypie/resources.py#L1262
(BSD licensed) and modified to pass the kwargs to `get_resource_list_uri`
(tracked by https://github.com/toastdriven/django-tastypie/pull/815)
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_list_uri(request, kwargs), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_resource_list_uri(self, request=None, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.11/tastypie/resources.py#L601
(BSD licensed) and modified to use the kwargs.
(v0.9.14 combines get_resource_list_uri and get_resource_uri; this re-separates them to keep things simpler)
"""
kwargs = dict(kwargs)
kwargs['resource_name'] = self._meta.resource_name
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
try:
return self._build_reverse_url("api_dispatch_list", kwargs=kwargs)
except NoReverseMatch:
return None
| {
"content_hash": "5e4765b173e607906ca66e8047fb185b",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 233,
"avg_line_length": 38.362637362637365,
"alnum_prop": 0.6499570323689488,
"repo_name": "puttarajubr/commcare-hq",
"id": "1a23eb14771864b7b42371b6a4ca38ebebce987e",
"size": "6982",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/apps/api/resources/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
from toee import *
import char_class_utils
###################################################
def GetConditionName():
return "Horizon Walker"
def GetCategory():
return "Core 3.5 Ed Prestige Classes"
def GetClassDefinitionFlags():
return CDF_CoreClass
def GetClassHelpTopic():
return "TAG_HORIZON_WALKERS"
classEnum = stat_level_horizon_walker
###################################################
class_feats = {
}
class_skills = (skill_alchemy, skill_balance, skill_climb, skill_diplomacy, skill_handle_animal, skill_hide, skill_knowledge_nature, skill_listen, skill_move_silently, skill_profession, skill_ride, skill_spot, skill_wilderness_lore)
def IsEnabled():
return 0
def GetHitDieType():
return 8
def GetSkillPtsPerLevel():
return 4
def GetBabProgression():
return base_attack_bonus_martial
def IsFortSaveFavored():
return 1
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 0
def GetSpellListType():
return spell_list_type_none
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
return 0 # WIP
if (obj.divine_spell_level_can_cast() < 7):
return 0
return 1 | {
"content_hash": "4abf421d5ad83850fad9830d3ba65077",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 232,
"avg_line_length": 19.281690140845072,
"alnum_prop": 0.7027027027027027,
"repo_name": "GrognardsFromHell/TemplePlus",
"id": "607b836bc9c15aefd66ecf585ec5c0c80c0f6016",
"size": "1369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpdatasrc/tpgamefiles/rules/char_class/class028_horizon_walker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "629718"
},
{
"name": "C#",
"bytes": "167885"
},
{
"name": "C++",
"bytes": "10018792"
},
{
"name": "CMake",
"bytes": "91980"
},
{
"name": "CSS",
"bytes": "1292"
},
{
"name": "HLSL",
"bytes": "18884"
},
{
"name": "HTML",
"bytes": "433942"
},
{
"name": "PowerShell",
"bytes": "5374"
},
{
"name": "Python",
"bytes": "2850350"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mxnet as mx
import numpy as np
# coding: utf-8
"""TensorBoard functions that can be used to log various status during epoch."""
import logging
class LogMetricsCallback(object):
def __init__(self, logging_dir, prefix=None):
self.prefix = prefix
self.itr = 0
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
def __call__(self, param):
"""Callback to log training speed and metrics in TensorBoard."""
if param.eval_metric is None:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
if self.prefix is not None:
name = '%s-%s' % (self.prefix, name)
self.summary_writer.add_scalar(name, value, self.itr)
self.itr += 1
class LossMetric(mx.metric.EvalMetric):
"""
Calculate precision and recall for bounding box detection
Parameters
----------
threshold : float
"""
def __init__(self, conf_threshold=0.85, eps=1e-5, allow_extra_outputs=True):
self.eps = eps
super(LossMetric, self).__init__('LossMetric', allow_extra_outputs=allow_extra_outputs)
self.conf_threshold = conf_threshold
def reset(self):
"""Clear the internal statistics to initial state."""
self.num_inst = self.eps
self.sum_tp = self.eps
self.sum_tn = self.eps
self.sum_fn = self.eps
self.sum_fp = self.eps
self.sum_conf = self.eps
self.sum_x = self.eps
self.sum_y = self.eps
self.sum_h = self.eps
self.sum_w = self.eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
self.num_inst += 1
self.sum_loss = np.mean(preds[0].asnumpy())
label = labels[0].asnumpy().reshape((-1, 49, 5))
pred = ((preds[0] + 1) / 2).asnumpy().reshape((-1, 49, 5))
c_label = label[:, :, 0]
c_pred = pred[:, :, 0]
boxes_pred = c_pred > self.conf_threshold
self.sum_tp = np.sum(c_label * boxes_pred)
self.sum_tn = np.sum((1 - c_label) * (1 - boxes_pred))
self.sum_fn = np.sum(c_label * (1 - boxes_pred))
self.sum_fp = np.sum(boxes_pred * (1 - c_label))
num_boxes = np.sum(c_label)
self.sum_conf = np.sum(np.abs(c_pred - c_label)) / \
(49 * label.shape[0])
self.sum_conf_box = np.sum(np.abs(c_pred - c_label) * c_label) / \
(np.sum(c_label))
self.sum_conf_nbox = np.sum(np.abs(c_pred - c_label) * (1 - c_label)) / \
(np.sum(1 - c_label))
self.sum_x = np.sum((np.abs(pred[:, :, 1] - 0.5 - label[:, :, 1])) * c_label) * 16 / num_boxes
self.sum_y = np.sum((np.abs(pred[:, :, 2] - 0.5 - label[:, :, 2])) * c_label) * 16 / num_boxes
self.sum_w = np.sum(np.abs(pred[:, :, 3] - label[:, :, 3]) * c_label) \
* 224 / num_boxes
self.sum_h = np.sum(np.abs(pred[:, :, 4] - label[:, :, 4]) * c_label) \
* 224 / num_boxes
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = ['c_accuracy', 'c_precision', 'c_recall', 'c_diff', 'c_box_diff', 'c_nbox_diff', 'x_diff', 'y_diff',
'w_diff', 'h_diff', 'loss']
values = []
values.append((self.sum_tp + self.sum_tn) / (
self.sum_tp + self.sum_tn + self.sum_fp + self.sum_fn))
values.append(self.sum_tp / (self.sum_tp + self.sum_fp + 1e-6))
values.append(self.sum_tp / (self.sum_tp + self.sum_fn + 1e-6))
values.extend([sum_val for sum_val in
(self.sum_conf, self.sum_conf_box, self.sum_conf_nbox, self.sum_x, self.sum_y, self.sum_w,
self.sum_h, self.sum_loss)])
return names, values | {
"content_hash": "bafd884cdf1740aea2fe44611d5c673c",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 116,
"avg_line_length": 36.891666666666666,
"alnum_prop": 0.531737067991868,
"repo_name": "BitTiger-MP/DS502-AI-Engineer",
"id": "9d5225745e46c72a4ab07174d199ce3ceb526820",
"size": "4427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DS502-1704/MXNet-week2-part1/metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Jupyter Notebook",
"bytes": "1934109"
},
{
"name": "Python",
"bytes": "112398"
},
{
"name": "Shell",
"bytes": "8376"
}
],
"symlink_target": ""
} |
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GradientDescentOptimizerTest(tf.test.TestCase):
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
lrate = tf.constant(3.0)
sgd_op = tf.train.GradientDescentOptimizer(lrate).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
def testGradWrtRef(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
opt = tf.train.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [tf.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0].ref() + vars_[1], vars_)
tf.initialize_all_variables().run()
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], grad.eval())
def testWithGlobalStep(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
global_step = tf.Variable(0, trainable=False)
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]),
global_step=global_step)
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
self.assertAllCloseAccordingToType(1, global_step.eval())
def testSparseBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = tf.IndexedSlices(
tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(
tf.constant([0.01], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[1.0 - 3.0 * 0.1], [2.0]], var0.eval())
self.assertAllCloseAccordingToType(
[[3.0], [4.0 - 3.0 * 0.01]], var1.eval())
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "ff917033fc4701a5add64edc5bee72e2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 42.865546218487395,
"alnum_prop": 0.589100176435993,
"repo_name": "HaebinShin/tensorflow",
"id": "67d4718adee626f02d04b71caeae9e86fa1e29fe",
"size": "5791",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/gradient_descent_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "176349"
},
{
"name": "C++",
"bytes": "10558866"
},
{
"name": "CMake",
"bytes": "34638"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "865714"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10609"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "20930"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45677"
},
{
"name": "Protocol Buffer",
"bytes": "118214"
},
{
"name": "Python",
"bytes": "8858431"
},
{
"name": "Shell",
"bytes": "234426"
},
{
"name": "TypeScript",
"bytes": "428153"
}
],
"symlink_target": ""
} |
"""
Created on Mon May 30 15:51:22 2016
Constellation object.
@author: Forrest
"""
import numpy as np
import pandas as pd
import struct
import skyfield.api
from skyfield.units import Angle
from astropy import units as u
import csv
class Constellation:
"""Constellation object which holds constellation name, abbreviation, and
RA/DEC for plotting."""
def __init__(self, observation, obstime, file):
self.obsv = observation
self.obstime = obstime
self.file = file
self.abbrv = []
self.name = []
self.ra = []
self.dec = []
self.star = []
self._read_csv()
self._create_star_objs()
def _read_csv(self):
"""Read csv file of constellation info into Constellation object."""
with open(self.file) as f:
reader = csv.reader(filter(lambda row: row[0] != '#', f))
for row in reader:
self.abbrv.append(row[0])
self.name.append(row[1])
self.ra.append(row[2])
self.dec.append(row[3])
def _create_star_objs(self):
"""Create star objs for each Constellation for calc of alt/az."""
ra = [r.split() for r in self.ra]
dec = [d.split() for d in self.dec]
self.star = [skyfield.api.Star(ra_hours=(float(r[0]), float(r[1])),
dec_degrees=(float(d[0]), float(d[1])))
for r, d in zip(ra, dec)]
def return_vis_constellations(self):
"""Return constellations visible at obstime from obsv location."""
obscon = self.obsv.obs.at(self.obstime)
cons_dat = zip(self.abbrv, self.name, self.ra, self.dec, self.star)
# Check elevation at time t
new_cons_dat = [(ab, na, ra, de, st) for ab, na, ra, de, st in cons_dat
if obscon.observe(st).apparent().altaz()[0].degrees > 5]
return zip(*new_cons_dat)
def return_cons_altaz(self, skyfield_stars):
"""Return constellation (alt, az) at obstime."""
alt = []
az = []
for s in skyfield_stars:
a, z, d = self.obsv.obs.at(self.obstime).observe(s).apparent().altaz()
alt.append(a.degrees)
az.append(z.degrees)
return (alt, az)
| {
"content_hash": "08702f0872b061245ec051e40ca47c5d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 82,
"avg_line_length": 29.5,
"alnum_prop": 0.5601912212081703,
"repo_name": "EP-Guy/VisPe",
"id": "c0f50d9cf5f385ad31353c605ee0693599fc5e58",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispe/Constellation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12845"
}
],
"symlink_target": ""
} |
import fastpinball
ports = [('\\.\COM10', 921600), ('\\.\COM11', 921600), ('\\.\COM12', 921600)] # dmd, net, led
portAssignments = (1, 0, 2)
dev = fastpinball.fpOpen(ports, portAssignments)
fastpinball.fpTimerConfig(dev, 100000)
fast_events = fastpinball.fpGetEventObject()
tick = 0
while True:
fastpinball.fpEventPoll(dev, fast_events)
event = fastpinball.fpGetEventType(fast_events)
print event
if event == fastpinball.FP_EVENT_TYPE_TIMER_TICK:
tick += 1
print tick
| {
"content_hash": "29a071daadd2cf1900f35b69eadfc62f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 94,
"avg_line_length": 25.35,
"alnum_prop": 0.6765285996055227,
"repo_name": "spierepf/mpf",
"id": "f30e41bf1482f963e613fd77b99319cf434f6a7c",
"size": "507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/fast_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1839"
},
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "1685472"
},
{
"name": "Shell",
"bytes": "729"
}
],
"symlink_target": ""
} |
"""Unit tests for the Quobyte volume driver module."""
import os
import mock
from oslo_concurrency import processutils
from oslo_utils import fileutils
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import quobyte
class QuobyteTestCase(test.NoDBTestCase):
"""Tests the nova.virt.libvirt.volume.quobyte module utilities."""
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute")
def test_quobyte_mount_volume(self, mock_execute, mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.mount_volume(quobyte_volume, export_mnt_base)
mock_ensure_tree.assert_called_once_with(export_mnt_base)
expected_commands = [mock.call('mount.quobyte',
quobyte_volume,
export_mnt_base,
check_exit_code=[0, 4])
]
mock_execute.assert_has_calls(expected_commands)
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute")
def test_quobyte_mount_volume_with_config(self,
mock_execute,
mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
config_file_dummy = "/etc/quobyte/dummy.conf"
quobyte.mount_volume(quobyte_volume,
export_mnt_base,
config_file_dummy)
mock_ensure_tree.assert_called_once_with(export_mnt_base)
expected_commands = [mock.call('mount.quobyte',
quobyte_volume,
export_mnt_base,
'-c',
config_file_dummy,
check_exit_code=[0, 4])
]
mock_execute.assert_has_calls(expected_commands)
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_mount_volume_fails(self, mock_execute, mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(processutils.ProcessExecutionError,
quobyte.mount_volume,
quobyte_volume,
export_mnt_base)
@mock.patch.object(utils, "execute")
def test_quobyte_umount_volume(self, mock_execute):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.umount_volume(export_mnt_base)
mock_execute.assert_called_once_with('umount.quobyte',
export_mnt_base)
@mock.patch.object(quobyte.LOG, "error")
@mock.patch.object(utils, "execute")
def test_quobyte_umount_volume_warns(self,
mock_execute,
mock_debug):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
def exec_side_effect(*cmd, **kwargs):
exerror = processutils.ProcessExecutionError(
"Device or resource busy")
raise exerror
mock_execute.side_effect = exec_side_effect
quobyte.umount_volume(export_mnt_base)
(mock_debug.
assert_called_once_with("The Quobyte volume at %s is still in use.",
export_mnt_base))
@mock.patch.object(quobyte.LOG, "exception")
@mock.patch.object(utils, "execute",
side_effect=(processutils.ProcessExecutionError))
def test_quobyte_umount_volume_fails(self,
mock_execute,
mock_exception):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.umount_volume(export_mnt_base)
(mock_exception.
assert_called_once_with("Couldn't unmount "
"the Quobyte Volume at %s",
export_mnt_base))
@mock.patch.object(os, "access", return_value=True)
@mock.patch.object(utils, "execute")
def test_quobyte_is_valid_volume(self, mock_execute, mock_access):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.validate_volume(export_mnt_base)
mock_execute.assert_called_once_with('getfattr',
'-n',
'quobyte.info',
export_mnt_base)
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_is_valid_volume_vol_not_valid_volume(self, mock_execute):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(exception.NovaException,
quobyte.validate_volume,
export_mnt_base)
@mock.patch.object(os, "access", return_value=False)
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_is_valid_volume_vol_no_valid_access(self,
mock_execute,
mock_access):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(exception.NovaException,
quobyte.validate_volume,
export_mnt_base)
class LibvirtQuobyteVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
"""Tests the LibvirtQuobyteVolumeDriver class."""
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'mount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_quobyte_driver_mount(self,
mock_is_mounted,
mock_mount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
mock_mount_volume.assert_called_once_with(quobyte_volume,
export_mnt_base,
mock.ANY)
mock_validate_volume.assert_called_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'umount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_quobyte_driver_umount(self, mock_is_mounted,
mock_umount_volume,
mock_validate_volume):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
mock_validate_volume.assert_called_once_with(export_mnt_base)
mock_umount_volume.assert_called_once_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'umount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_quobyte_driver_already_mounted(self,
mock_is_mounted,
mock_umount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
mock_umount_volume.assert_called_once_with(export_mnt_base)
mock_validate_volume.assert_called_once_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'mount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_quobyte_driver_qcow2(self, mock_is_mounted,
mock_mount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
name = 'volume-00001'
image_format = 'qcow2'
quobyte_volume = '192.168.1.1/volume-00001'
connection_info = {'data': {'export': export_string,
'name': name,
'format': image_format}}
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('file', tree.get('type'))
self.assertEqual('qcow2', tree.find('./driver').get('type'))
(mock_mount_volume.
assert_called_once_with('192.168.1.1/volume-00001',
export_mnt_base,
mock.ANY))
mock_validate_volume.assert_called_with(export_mnt_base)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_quobyte_driver_mount_non_quobyte_volume(self,
mock_is_mounted):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
connection_info = {'data': {'export': export_string,
'name': self.name}}
def exe_side_effect(*cmd, **kwargs):
if cmd == mock.ANY:
raise exception.NovaException()
with mock.patch.object(quobyte,
'validate_volume') as mock_execute:
mock_execute.side_effect = exe_side_effect
self.assertRaises(exception.NovaException,
libvirt_driver.connect_volume,
connection_info,
self.disk_info,
mock.sentinel.instance)
def test_libvirt_quobyte_driver_normalize_export_with_protocol(self):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = 'quobyte://192.168.1.1/volume-00001'
self.assertEqual("192.168.1.1/volume-00001",
libvirt_driver._normalize_export(export_string))
def test_libvirt_quobyte_driver_normalize_export_without_protocol(self):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_host)
export_string = '192.168.1.1/volume-00001'
self.assertEqual("192.168.1.1/volume-00001",
libvirt_driver._normalize_export(export_string))
| {
"content_hash": "d482bf8b1e0bc516fbe0cff5013d8e88",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 78,
"avg_line_length": 44.822535211267606,
"alnum_prop": 0.5356963298139769,
"repo_name": "rajalokan/nova",
"id": "d99dda0310946c3358473c0f5c9706ac1ae353f7",
"size": "16543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/volume/test_quobyte.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import yaml
from rospkg import RosPack, RosStack, ResourceNotFound
BASE_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/base.yaml'
PYTHON_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/python.yaml'
def get_test_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_test_tree_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'tree'))
def get_cache_dir():
p = os.path.join(get_test_dir(), 'sources_cache')
assert os.path.isdir(p)
return p
def create_test_SourcesListLoader():
from rosdep2.sources_list import SourcesListLoader
return SourcesListLoader.create_default(sources_cache_dir=get_cache_dir(), verbose=True)
def get_cache_raw():
cache_rosdep_path = os.path.join(get_cache_dir(), '0a12d6e7b0d47be9b76e7726720e4cb79528cbaa')
with open(cache_rosdep_path) as f:
cache_raw = yaml.safe_load(f.read())
return cache_raw
def get_cache_raw_python():
cache_rosdep_path = os.path.join(get_cache_dir(), 'f6f4ef95664e373cd4754501337fa217f5b55d91')
with open(cache_rosdep_path) as f:
cache_raw = yaml.safe_load(f.read())
return cache_raw
def get_test_rospkgs():
test_dir = get_test_tree_dir()
ros_root = os.path.join(test_dir, 'ros')
ros_package_path = os.path.join(test_dir, 'stacks')
catkin_package_path = os.path.join(test_dir, 'catkin')
ros_paths = [ros_root, ros_package_path, catkin_package_path]
rospack = RosPack(ros_paths=ros_paths)
rosstack = RosStack(ros_paths=ros_paths)
return rospack, rosstack
FAKE_TINYXML_RULE = """testtinyxml:
ubuntu:
lucid:
apt:
packages: libtinyxml-dev
debian: libtinyxml-dev
osx:
source:
uri: 'http://kforge.ros.org/rosrelease/viewvc/sourcedeps/tinyxml/tinyxml-2.6.2-1.rdmanifest'
md5sum: 13760e61e08c9004493c302a16966c42
fedora:
yum:
packages: tinyxml-devel"""
def test_RosdepDefinition():
from rosdep2.lookup import RosdepDefinition, ResolutionError, InvalidData
d = dict(a=1, b=2, c=3)
def1 = RosdepDefinition('d', d)
assert def1.rosdep_key == 'd'
assert def1.data == d
def2 = RosdepDefinition('d', d, 'file1.txt')
assert def2.rosdep_key == 'd'
assert def2.data == d
assert def2.origin == 'file1.txt'
# test get_rule_for_platform
# - test w/invalid data
try:
RosdepDefinition('dbad', 'foo', 'bad.txt').get_rule_for_platform('ubuntu', 'hardy', ['apt'], 'apt')
assert False, 'should have failed'
except InvalidData:
pass
try:
RosdepDefinition('dbad', {'ubuntu': 1}, 'bad2.txt').get_rule_for_platform('ubuntu', 'hardy', ['apt'], 'apt')
assert False, 'should have failed'
except InvalidData:
pass
try:
RosdepDefinition('dbad', {'ubuntu': {'hardy': 1}}, 'bad2.txt').get_rule_for_platform('ubuntu', 'hardy', ['apt'], 'apt')
assert False, 'should have failed'
except InvalidData:
pass
# - test w/valid data
d2 = yaml.safe_load(FAKE_TINYXML_RULE)['testtinyxml']
definition = RosdepDefinition('d2', d2, 'file2.txt')
# - tripwire
str(definition)
val = definition.get_rule_for_platform('fedora', 'fake-version', ['yum', 'source', 'pip'], 'yum')
assert val == ('yum', dict(packages='tinyxml-devel')), val
val = definition.get_rule_for_platform('debian', 'sid', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', 'libtinyxml-dev')
val = definition.get_rule_for_platform('ubuntu', 'lucid', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', dict(packages='libtinyxml-dev')), val
val = definition.get_rule_for_platform('osx', 'snow', ['macports', 'source', 'pip'], 'macports')
assert val == ('source', dict(md5sum='13760e61e08c9004493c302a16966c42', uri='http://kforge.ros.org/rosrelease/viewvc/sourcedeps/tinyxml/tinyxml-2.6.2-1.rdmanifest')), val
# test bad resolutions
try:
val = definition.get_rule_for_platform('ubuntu', 'hardy', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'd2'
assert e.rosdep_data == d2
assert e.os_name == 'ubuntu'
assert e.os_version == 'hardy'
# tripwire
str(e)
try:
val = definition.get_rule_for_platform('fakeos', 'fakeversion', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'd2'
assert e.rosdep_data == d2
assert e.os_name == 'fakeos'
assert e.os_version == 'fakeversion'
# tripwire
str(e)
definition = RosdepDefinition('testtinyxml', {'ubuntu': {'lucid': None, 'precise': ['libtinyxml-dev'], '*': ['libtinyxml2-dev']}}, 'wildcard.txt')
try:
val = definition.get_rule_for_platform('ubuntu', 'lucid', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'testtinyxml'
assert e.os_name == 'ubuntu'
assert e.os_version == 'lucid'
# tripwire
str(e)
val = definition.get_rule_for_platform('ubuntu', 'precise', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', ['libtinyxml-dev']), val
val = definition.get_rule_for_platform('ubuntu', 'trusty', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', ['libtinyxml2-dev']), val
definition = RosdepDefinition('trusty_only_key', {'ubuntu': {'*': None, 'trusty': ['trusty_only_pkg']}, 'debian': None}, 'wildcard.txt')
try:
val = definition.get_rule_for_platform('ubuntu', 'lucid', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'trusty_only_key'
assert e.os_name == 'ubuntu'
assert e.os_version == 'lucid'
# tripwire
str(e)
try:
val = definition.get_rule_for_platform('debian', 'stretch', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'trusty_only_key'
assert e.os_name == 'debian'
assert e.os_version == 'stretch'
# tripwire
str(e)
definition = RosdepDefinition('invalid_os_wildcard', {'*': ['pytest']}, 'os_wildcard.txt')
try:
val = definition.get_rule_for_platform('debian', 'sid', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except InvalidData:
pass
definition = RosdepDefinition('non_debian_key', {'debian': None, '*': {'pip': ['pytest']}}, 'os_wildcard.txt')
try:
val = definition.get_rule_for_platform('debian', 'sid', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'non_debian_key'
assert e.os_name == 'debian'
assert e.os_version == 'sid'
# tripwire
str(e)
# package manager not supported
try:
val = definition.get_rule_for_platform('ubuntu', 'precise', ['apt', 'source'], 'apt')
assert False, 'should have raised: %s' % (str(val))
except ResolutionError as e:
assert e.rosdep_key == 'non_debian_key'
assert e.os_name == 'ubuntu'
assert e.os_version == 'precise'
# tripwire
str(e)
val = definition.get_rule_for_platform('ubuntu', 'precise', ['apt', 'source', 'pip'], 'apt')
assert val == ('pip', ['pytest']), val
val = definition.get_rule_for_platform('fedora', '35', ['dnf', 'source', 'pip'], 'dnf')
assert val == ('pip', ['pytest']), val
# test reverse merging OS things (first is default)
definition = RosdepDefinition('test', {'debian': 'libtest-dev'}, 'fake-1.txt')
# rule should work as expected before reverse-merge
val = definition.get_rule_for_platform('debian', 'sid', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', 'libtest-dev'), val
try:
val = definition.get_rule_for_platform('ubuntu', 'precise', ['apt', 'source', 'pip'], 'apt')
assert False, 'should have failed'
except ResolutionError as e:
assert e.rosdep_key == 'test'
assert e.os_name == 'ubuntu'
assert e.os_version == 'precise'
# tripwire?
str(e)
definition.reverse_merge({'ubuntu': {'precise': {'apt': 'ros-fuerte-test'}}}, 'fake-gbp.yaml')
val = definition.get_rule_for_platform('ubuntu', 'precise', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', 'ros-fuerte-test'), val
val = definition.get_rule_for_platform('debian', 'sid', ['apt', 'source', 'pip'], 'apt')
assert val == ('apt', 'libtest-dev'), val
def test_RosdepView_merge():
from rosdep2.model import RosdepDatabaseEntry
from rosdep2.lookup import RosdepView
# rosdep data must be dictionary of dictionaries
data = dict(a=dict(x=1), b=dict(y=2), c=dict(z=3))
# create empty view and test
view = RosdepView('common')
assert len(view.keys()) == 0
# - tripwire
str(view)
# make sure lookups fail if not found
try:
view.lookup('notfound')
assert False, 'should have raised KeyError'
except KeyError as e:
assert 'notfound' in str(e)
# merge into empty view
d = RosdepDatabaseEntry(data, [], 'origin')
view.merge(d)
assert set(view.keys()) == set(data.keys())
for k, v in data.items():
assert view.lookup(k).data == v, '%s vs. %s' % (view.lookup(k), v)
# merge exact same data
d2 = RosdepDatabaseEntry(data, [], 'origin2')
view.merge(d2)
assert set(view.keys()) == set(data.keys())
for k, v in data.items():
assert view.lookup(k).data == v
# merge new for 'd', 'e'
d3 = RosdepDatabaseEntry(dict(d=dict(o=4), e=dict(p=5)), [], 'origin3')
view.merge(d3)
assert set(view.keys()) == set(list(data.keys()) + ['d', 'e'])
for k, v in data.items():
assert view.lookup(k).data == v
assert view.lookup('d').data == dict(o=4)
assert view.lookup('e').data == dict(p=5)
# merge different data for 'a'
d4 = RosdepDatabaseEntry(dict(a=dict(x=2)), [], 'origin4')
# - first w/o override, should not bump
view.merge(d4, override=False)
assert view.lookup('a').data == dict(x=1), view.lookup('a').data
assert view.lookup('b').data == dict(y=2)
assert view.lookup('c').data == dict(z=3)
assert view.lookup('d').data == dict(o=4)
assert view.lookup('e').data == dict(p=5)
# - now w/ override
view.merge(d4, override=True)
assert view.lookup('a').data == dict(x=2)
assert view.lookup('b').data == dict(y=2)
assert view.lookup('c').data == dict(z=3)
assert view.lookup('d').data == dict(o=4)
assert view.lookup('e').data == dict(p=5)
# - tripwire
str(view)
def test_RosdepLookup_get_rosdeps():
from rosdep2.loader import RosdepLoader
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
assert lookup.get_loader() is not None
assert isinstance(lookup.get_loader(), RosdepLoader)
print(lookup.get_rosdeps('empty_package'))
assert lookup.get_rosdeps('empty_package') == []
try:
assert lookup.get_rosdeps('not a resource') == []
assert False, 'should have raised'
except ResourceNotFound:
pass
print(lookup.get_rosdeps('stack1_p1'))
assert set(lookup.get_rosdeps('stack1_p1')) == set(['stack1_dep1', 'stack1_p1_dep1', 'stack1_p1_dep2'])
assert set(lookup.get_rosdeps('stack1_p1', implicit=False)) == set(['stack1_dep1', 'stack1_p1_dep1', 'stack1_p1_dep2'])
print(lookup.get_rosdeps('stack1_p2'))
assert set(lookup.get_rosdeps('stack1_p2', implicit=False)) == set(['stack1_dep1', 'stack1_dep2', 'stack1_p2_dep1']), set(lookup.get_rosdeps('stack1_p2'))
assert set(lookup.get_rosdeps('stack1_p2', implicit=True)) == set(['stack1_dep1', 'stack1_dep2', 'stack1_p1_dep1', 'stack1_p1_dep2', 'stack1_p2_dep1']), set(lookup.get_rosdeps('stack1_p2'))
# catkin
print(lookup.get_rosdeps('simple_catkin_package'))
assert set(lookup.get_rosdeps('simple_catkin_package')) == set(['catkin', 'testboost'])
assert set(lookup.get_rosdeps('simple_catkin_package', implicit=False)) == set(['catkin', 'testboost'])
print(lookup.get_rosdeps('another_catkin_package'))
assert set(lookup.get_rosdeps('another_catkin_package')) == set(['catkin', 'simple_catkin_package']) # implicit deps won't get included
assert set(lookup.get_rosdeps('another_catkin_package', implicit=False)) == set(['catkin', 'simple_catkin_package'])
print(lookup.get_rosdeps('metapackage_with_deps'))
assert set(lookup.get_rosdeps('metapackage_with_deps')) == set(['catkin', 'simple_catkin_package', 'another_catkin_package']) # implicit deps won't get included
assert set(lookup.get_rosdeps('metapackage_with_deps', implicit=False)) == set(['catkin', 'simple_catkin_package', 'another_catkin_package'])
def test_RosdepLookup_dependency_types():
from rosdep2.loader import RosdepLoader
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
default_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
buildtool_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['buildtool'])
build_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['build'])
build_export_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['build_export'])
exec_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['exec'])
test_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['test'])
doc_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['doc'])
mix_lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader, dependency_types=['build', 'build_export'])
buildtool_deps = ['catkin']
build_deps = ['testboost', 'eigen']
build_export_deps = ['eigen', 'testtinyxml']
exec_deps = ['eigen', 'testlibtool']
test_deps = ['curl']
doc_deps = ['epydoc']
default_deps = buildtool_deps + build_deps + build_export_deps + exec_deps + test_deps
assert set(buildtool_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(buildtool_deps)
assert set(build_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(build_deps)
assert set(build_export_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(build_export_deps)
assert set(exec_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(exec_deps)
assert set(test_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(test_deps)
assert set(mix_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(build_deps + build_export_deps)
assert set(default_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(default_deps)
assert set(doc_lookup.get_rosdeps('multi_dep_type_catkin_package')) == set(doc_deps)
def test_RosdepLookup_get_resources_that_need():
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
assert lookup.get_resources_that_need('fake') == []
assert set(lookup.get_resources_that_need('stack1_dep1')) == set(['stack1_p1', 'stack1_p2'])
assert lookup.get_resources_that_need('stack1_dep2') == ['stack1_p2']
assert lookup.get_resources_that_need('stack1_p1_dep1') == ['stack1_p1']
def test_RosdepLookup_create_from_rospkg():
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
# these are just tripwire, can't actually test as it depends on external env
lookup = RosdepLookup.create_from_rospkg()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack)
assert rospack == lookup.loader._rospack
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack)
assert rospack == lookup.loader._rospack
assert rosstack == lookup.loader._rosstack
def test_RosdepLookup_get_rosdep_view_for_resource():
from rosdep2.lookup import RosdepLookup
from rosdep2.rospkg_loader import DEFAULT_VIEW_KEY, RosPkgLoader
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
# assumption of our tests
assert isinstance(lookup.loader, RosPkgLoader)
# depends on nothing
cache_raw = get_cache_raw()
py_cache_raw = get_cache_raw_python()
# - first pass: no cache
ros_view = lookup.get_rosdep_view_for_resource('roscpp_fake')
libtool = ros_view.lookup('testlibtool')
assert BASE_URL == libtool.origin
assert cache_raw['testlibtool'] == libtool.data
python = ros_view.lookup('testpython')
assert PYTHON_URL == python.origin
assert py_cache_raw['testpython'] == python.data
# package not in stack, should return
assert lookup.get_rosdep_view_for_resource('just_a_package').name is DEFAULT_VIEW_KEY
# meta-packages should return default view as well
assert lookup.get_rosdep_view_for_resource('metapackage_with_deps').name is DEFAULT_VIEW_KEY
def test_RosdepLookup_get_rosdep_view():
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
# depends on nothing
cache_raw = get_cache_raw()
py_cache_raw = get_cache_raw_python()
# - first pass: no cache
ros_view = lookup.get_rosdep_view('ros')
libtool = ros_view.lookup('testlibtool')
assert BASE_URL == libtool.origin
assert cache_raw['testlibtool'] == libtool.data
python = ros_view.lookup('testpython')
assert PYTHON_URL == python.origin
assert py_cache_raw['testpython'] == python.data, python.data
# - second pass: with cache
ros_view = lookup.get_rosdep_view('ros')
libtool = ros_view.lookup('testlibtool')
assert BASE_URL == libtool.origin
assert cache_raw['testlibtool'] == libtool.data
# depends on ros
stack1_view = lookup.get_rosdep_view('stack1')
stack1_rosdep_path = os.path.join(rosstack.get_path('stack1'), 'rosdep.yaml')
# - make sure ros data is available
libtool = stack1_view.lookup('testlibtool')
assert BASE_URL == libtool.origin
assert cache_raw['testlibtool'] == libtool.data
python = stack1_view.lookup('testpython')
assert PYTHON_URL == python.origin
assert py_cache_raw['testpython'] == python.data
def test_RosdepLookup_get_errors():
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
tree_dir = get_test_tree_dir()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
# shouldn't be any errors (yet)
assert lookup.get_errors() == []
# force errors
lookup._load_all_views(lookup.loader)
# TODO: force errors. Previous tests relied on bad stack views.
# Now we need a bad sources cache.
def test_RosdepLookup_get_views_that_define():
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
tree_dir = get_test_tree_dir()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
val = lookup.get_views_that_define('testboost')
assert len(val) == 1
entry = val[0]
assert entry == (BASE_URL, BASE_URL), entry
val = lookup.get_views_that_define('testpython')
assert len(val) == 1
entry = val[0]
assert entry == (PYTHON_URL, PYTHON_URL), entry
def test_RosdepLookup_resolve_all_errors():
from rosdep2.installers import InstallerContext
from rosdep2.lookup import RosdepLookup, ResolutionError
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
# the installer context has nothing in it, lookups will fail
installer_context = InstallerContext()
installer_context.set_os_override('ubuntu', 'lucid')
resolutions, errors = lookup.resolve_all(['rospack_fake'], installer_context)
assert 'rospack_fake' in errors
resolutions, errors = lookup.resolve_all(['not_a_resource'], installer_context)
assert 'not_a_resource' in errors, errors
def test_RosdepLookup_resolve_errors():
from rosdep2.installers import InstallerContext
from rosdep2.lookup import RosdepLookup, ResolutionError
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
# the installer context has nothing in it, lookups will fail
installer_context = InstallerContext()
installer_context.set_os_override('ubuntu', 'lucid')
try:
lookup.resolve('testtinyxml', 'rospack_fake', installer_context)
assert False, 'should have raised'
except ResolutionError as e:
assert 'Unsupported OS' in str(e), str(e)
try:
lookup.resolve('fakedep', 'rospack_fake', installer_context)
assert False, 'should have raised'
except ResolutionError as e:
assert 'Cannot locate rosdep definition' in str(e), str(e)
def test_RosdepLookup_resolve():
from rosdep2 import create_default_installer_context
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
installer_context = create_default_installer_context()
installer_context.set_os_override('ubuntu', 'lucid')
# repeat for caching
for count in range(0, 2):
installer_key, resolution, dependencies = lookup.resolve('testtinyxml', 'rospack_fake', installer_context)
assert 'apt' == installer_key
assert ['libtinyxml-dev'] == resolution
assert [] == dependencies
installer_key, resolution, dependencies = lookup.resolve('testboost', 'roscpp_fake', installer_context)
assert 'apt' == installer_key
assert ['libboost1.40-all-dev'] == resolution
assert [] == dependencies
installer_key, resolution, dependencies = lookup.resolve('testlibtool', 'roscpp_fake', installer_context)
assert 'apt' == installer_key
assert set(['libtool', 'libltdl-dev']) == set(resolution)
assert [] == dependencies
def test_RosdepLookup_resolve_all():
from rosdep2 import create_default_installer_context
from rosdep2.lookup import RosdepLookup
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack,
sources_loader=sources_loader)
installer_context = create_default_installer_context()
installer_context.set_os_override('ubuntu', 'lucid')
# repeat for caching
lookup.verbose = True
for count in range(0, 2):
resolutions, errors = lookup.resolve_all(['rospack_fake', 'roscpp_fake'], installer_context)
assert not errors, errors
installer_keys, resolveds = zip(*resolutions)
assert 'apt' in installer_keys
apt_resolutions = []
for k, v in resolutions:
if k == 'apt':
apt_resolutions.extend(v)
assert set(apt_resolutions) == set(['libtinyxml-dev', 'libboost1.40-all-dev', 'libtool', 'libltdl-dev']), set(apt_resolutions)
| {
"content_hash": "1d58c575a37cc59a39a71a76c6f81225",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 193,
"avg_line_length": 42.21147540983606,
"alnum_prop": 0.6423938793739563,
"repo_name": "ros-infrastructure/rosdep",
"id": "176abd0d00f5dbfcb81fb1a8fcb60ebf133c89c7",
"size": "27328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_rosdep_lookup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "503079"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.comments.forms import CommentForm
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from threadedcomments.models import ThreadedComment
class ThreadedCommentForm(CommentForm):
parent = forms.IntegerField(required=False, widget=forms.HiddenInput)
def __init__(self, target_object, parent=None, data=None, initial=None):
self.base_fields.insert(
self.base_fields.keyOrder.index('comment'), 'title',
forms.CharField(label=_('Title'), required=False, max_length=getattr(settings, 'COMMENTS_TITLE_MAX_LENGTH', 255))
)
self.parent = parent
if initial is None:
initial = {}
initial.update({'parent': self.parent})
super(ThreadedCommentForm, self).__init__(target_object, data=data, initial=initial)
def get_comment_model(self):
return ThreadedComment
def get_comment_create_data(self):
d = super(ThreadedCommentForm, self).get_comment_create_data()
d['parent_id'] = self.cleaned_data['parent']
d['title'] = self.cleaned_data['title']
return d
| {
"content_hash": "4da4398d4c6e808394295852f3f9c3ad",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 125,
"avg_line_length": 38.86666666666667,
"alnum_prop": 0.6809605488850772,
"repo_name": "Afnarel/django-threadedcomments",
"id": "91c1300d5017974efcb797fe4ebb6226a78b3514",
"size": "1166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "threadedcomments/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import os.path as path
def getfiles(directory,extension=None):
if not os.path.isdir(directory):
raise StandardError("not valid dir")
files = []
for f in os.listdir(directory):
if extension == None or f.endswith(extension):
files.append(path.abspath(path.join(directory,f)))
return files
def get_abs_path(p):
if path.isabs(p):
return p
else:
return path.abspath(path.join(os.getcwd(),p))
def makedirs(p,check=False):
if not check:
if not os.path.exists(p):
os.makedirs(p)
else:
os.makedirs(p)
| {
"content_hash": "bc169951673ac3a3bc88fb36b6d1d179",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 25.375,
"alnum_prop": 0.6157635467980296,
"repo_name": "yskflute/exputils",
"id": "8a9d4bc3062094d2ca9c7e644ccaf14d45d98faf",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exputils/pathutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23696"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
from collections import OrderedDict
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.generator.ninja as ninja_generator
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
from gyp.common import OrderedSet
PY3 = bytes != str
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
generator_default_variables = {
'DRIVER_PREFIX': '',
'DRIVER_SUFFIX': '.sys',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
'msvs_enable_winrt',
'msvs_requires_importlibrary',
'msvs_enable_winphone',
'msvs_enable_marmasm',
'msvs_application_type_revision',
'msvs_target_platform_version',
'msvs_target_platform_minversion',
]
generator_filelist_paths = None
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
if PY3:
config = config.decode('utf-8')
username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$' and not _IsWindowsAbsPath(path):
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _IsWindowsAbsPath(path):
"""
On Cygwin systems Python needs a little help determining if a path is an absolute Windows path or not, so that
it does not treat those as relative, which results in bad paths like:
'..\C:\<some path>\some_source_code_file.cc'
"""
return path.startswith('c:') or path.startswith('C:')
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if 'CompileAsWinRT' == setting:
return
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigTargetVersion(config_data):
return config_data.get('msvs_target_version', 'Windows7')
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _ConfigWindowsTargetPlatformVersion(config_data, version):
target_ver = config_data.get('msvs_windows_target_platform_version')
if target_ver and re.match(r'^\d+', target_ver):
return target_ver
config_ver = config_data.get('msvs_windows_sdk_version')
vers = [config_ver] if config_ver else version.compatible_sdks
for ver in vers:
for key in [
r'HKLM\Software\Microsoft\Microsoft SDKs\Windows\%s',
r'HKLM\Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows\%s']:
sdk_dir = MSVSVersion._RegistryGetValue(key % ver, 'InstallationFolder')
if not sdk_dir:
continue
version = MSVSVersion._RegistryGetValue(key % ver, 'ProductVersion') or ''
# Find a matching entry in sdk_dir\include.
expected_sdk_dir=r'%s\include' % sdk_dir
names = sorted([x for x in (os.listdir(expected_sdk_dir)
if os.path.isdir(expected_sdk_dir)
else []
)
if x.startswith(version)], reverse=True)
if names:
return names[0]
else:
print('Warning: No include files found for detected '
'Windows SDK version %s' % (version), file=sys.stdout)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].items():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s, file=sys.stderr)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False)
def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
# Remove trigger_file from excluded_sources to let the rule be triggered
# (e.g. rule trigger ax_enums.idl is added to excluded_sources
# because it's also in an action's inputs in the same project)
excluded_sources.discard(_FixPath(trigger_file))
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not is_msbuild:
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
if spec['type'] == 'windows_driver':
toolset = 'WindowsKernelModeDriver10.0'
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.items():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' % spec['target_name']
+ error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].items():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'windows_driver': '5', # .sys
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'windows_driver': ('VCLinkerTool', 'Link', '$(OutDir)', '.sys'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable',
'windows_driver']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.items():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.items():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].items():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.items():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].items():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].items():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].items():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].items():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].items():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].items():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
fixed_dst = _FixPath(dst)
full_dst = '"%s\\%s\\"' % (fixed_dst, outer_dir)
cmd = 'mkdir %s 2>nul & cd "%s" && xcopy /e /f /y "%s" %s' % (
full_dst, _FixPath(base_dir), outer_dir, full_dst)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, fixed_dst)))
else:
fix_dst = _FixPath(cpy['destination'])
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
fix_dst, _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, fix_dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.items():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
list(node)[0] == parent + '.vcproj'):
return node[list(node)[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[list(root)[0]]) == dict:
root = root[list(root)[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].items():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
if params.get('target_arch') == 'arm64':
configuration += '_arm64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.items():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print('Building [%s]: %s' % (config, arguments))
rtn = subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
if params.get('flavor') == 'ninja':
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ninja_generator.ComputeOutputDir(params),
'gypfiles-msvs-ninja'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].items():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print("Warning: " + error_message, file=sys.stdout)
def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name,
platforms):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, platforms,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name, platforms,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
platforms, filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name,
platforms)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name, platforms):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
ext = ext.lower()
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx', '.mm']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext in ['.s', '.asm']:
group = 'masm'
element = 'MASM'
for platform in platforms:
if platform.lower() in ['arm', 'arm64']:
element = 'MARMASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.items()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, version, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
properties = [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
properties[0].append(['PreferredToolArchitecture', 'x64'])
if spec.get('msvs_target_platform_version'):
target_platform_version = spec.get('msvs_target_platform_version')
properties[0].append(['WindowsTargetPlatformVersion',
target_platform_version])
if spec.get('msvs_target_platform_minversion'):
target_platform_minversion = spec.get('msvs_target_platform_minversion')
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_minversion])
else:
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_version])
if spec.get('msvs_enable_winrt'):
properties[0].append(['DefaultLanguage', 'en-US'])
properties[0].append(['AppContainerApplication', 'true'])
if spec.get('msvs_application_type_revision'):
app_type_revision = spec.get('msvs_application_type_revision')
properties[0].append(['ApplicationTypeRevision', app_type_revision])
else:
properties[0].append(['ApplicationTypeRevision', '8.1'])
if spec.get('msvs_enable_winphone'):
properties[0].append(['ApplicationType', 'Windows Phone'])
else:
properties[0].append(['ApplicationType', 'Windows Store'])
platform_name = None
msvs_windows_sdk_version = None
for configuration in spec['configurations'].values():
platform_name = platform_name or _ConfigPlatform(configuration)
msvs_windows_sdk_version = (msvs_windows_sdk_version or
_ConfigWindowsTargetPlatformVersion(configuration, version))
if platform_name and msvs_windows_sdk_version:
break
if msvs_windows_sdk_version:
properties[0].append(['WindowsTargetPlatformVersion',
str(msvs_windows_sdk_version)])
elif version.compatible_sdks:
raise GypError('%s requires any SDK of %s version, but none were found' %
(version.description, version.compatible_sdks))
if platform_name == 'ARM':
properties[0].append(['WindowsSDKDesktopARMSupport', 'true'])
return properties
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].items():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
config_type = msbuild_attributes.get('ConfigurationType')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
config_type)
if config_type == 'Driver':
_AddConditionalProperty(properties, condition, 'DriverType', 'WDM')
_AddConditionalProperty(properties, condition, 'TargetVersion',
_ConfigTargetVersion(settings))
if character_set:
if 'msvs_enable_winrt' not in spec :
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.items()):
configuration = _GetConfigurationCondition(name, settings)
if 'msbuild_props' in settings:
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.items():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print('Warning: Do not know how to convert MSVS attribute ' + a)
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'5': 'Driver',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'windows_driver': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.items()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.items()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.items()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.items()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.items()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.items()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'Midl',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the environment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
else:
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
# Turn off WinRT compilation
_ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
# Turn on import libraries if appropriate
if spec.get('msvs_requires_importlibrary'):
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded):
groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
'rule_dependency']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].items():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name,
_GetUniquePlatforms(spec))
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).values():
if config.get('msvs_use_library_dependency_inputs', 0):
project_ref.append(['UseLibraryDependencyInputs', 'true'])
break
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
rule_dependencies = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
rule_dependencies,
extension_to_rule_name, _GetUniquePlatforms(spec))
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.values():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
import_masm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
import_masm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
import_marmasm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\marmasm.props'}]]
import_marmasm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\marmasm.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, version, project.guid,
project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
if spec.get('msvs_enable_winphone'):
content += _GetMSBuildLocalProperties('v120_wp81')
else:
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += import_masm_props_section
if spec.get('msvs_enable_marmasm'):
content += import_marmasm_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += import_masm_targets_section
if spec.get('msvs_enable_marmasm'):
content += import_marmasm_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.items():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
for c in commands])
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| {
"content_hash": "c9edbea341d00771ecfc5d60f73f6e43",
"timestamp": "",
"source": "github",
"line_count": 3571,
"max_line_length": 134,
"avg_line_length": 37.95407448893867,
"alnum_prop": 0.6424439624005784,
"repo_name": "enclose-io/compiler",
"id": "933042c7113c59b1bd45f50a2b48e66073b7bc02",
"size": "135691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lts/tools/gyp/pylib/gyp/generator/msvs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "11474"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse_lazy
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
from braces.views import LoginRequiredMixin
from ..forms import RegistrationForm
from ..models import get_application_model
class ApplicationOwnerIsUserMixin(LoginRequiredMixin):
"""
This mixin is used to provide an Application queryset filtered by the current request.user.
"""
model = get_application_model()
fields = '__all__'
def get_queryset(self):
queryset = super(ApplicationOwnerIsUserMixin, self).get_queryset()
return queryset.filter(user=self.request.user)
class ApplicationRegistration(LoginRequiredMixin, CreateView):
"""
View used to register a new Application for the request.user
"""
form_class = RegistrationForm
template_name = "oauth2_provider/application_registration_form.html"
def form_valid(self, form):
form.instance.user = self.request.user
return super(ApplicationRegistration, self).form_valid(form)
class ApplicationDetail(ApplicationOwnerIsUserMixin, DetailView):
"""
Detail view for an application instance owned by the request.user
"""
context_object_name = 'application'
template_name = "oauth2_provider/application_detail.html"
class ApplicationList(ApplicationOwnerIsUserMixin, ListView):
"""
List view for all the applications owned by the request.user
"""
context_object_name = 'applications'
template_name = "oauth2_provider/application_list.html"
class ApplicationDelete(ApplicationOwnerIsUserMixin, DeleteView):
"""
View used to delete an application owned by the request.user
"""
context_object_name = 'application'
success_url = reverse_lazy('oauth2_provider:list')
template_name = "oauth2_provider/application_confirm_delete.html"
class ApplicationUpdate(ApplicationOwnerIsUserMixin, UpdateView):
"""
View used to update an application owned by the request.user
"""
context_object_name = 'application'
template_name = "oauth2_provider/application_form.html"
| {
"content_hash": "459927daafe8c1cc1ab40552d8e3b5be",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 95,
"avg_line_length": 33.125,
"alnum_prop": 0.7382075471698113,
"repo_name": "svetlyak40wt/django-oauth-toolkit",
"id": "2d6ef617f495e650ab5e034559ffb034e79093b7",
"size": "2120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oauth2_provider/views/application.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "207679"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import datetime
from functools import partial
from textwrap import dedent
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs.tslibs import Timedelta
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
Axis,
FrameOrSeries,
TimedeltaConvertibleTypes,
)
if TYPE_CHECKING:
from pandas import DataFrame, Series
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.dtypes.missing import isna
import pandas.core.common as common # noqa: PDF018
from pandas.core.util.numba_ import maybe_use_numba
from pandas.core.window.common import zsqrt
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
)
from pandas.core.window.indexers import (
BaseIndexer,
ExponentialMovingWindowIndexer,
GroupbyIndexer,
)
from pandas.core.window.numba_ import (
generate_ewma_numba_table_func,
generate_numba_ewma_func,
)
from pandas.core.window.online import (
EWMMeanState,
generate_online_numba_ewma_func,
)
from pandas.core.window.rolling import (
BaseWindow,
BaseWindowGroupby,
)
def get_center_of_mass(
comass: float | None,
span: float | None,
halflife: float | None,
alpha: float | None,
) -> float:
valid_count = common.count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _calculate_deltas(
times: str | np.ndarray | FrameOrSeries | None,
halflife: float | TimedeltaConvertibleTypes | None,
) -> np.ndarray:
"""
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
Parameters
----------
times : str, np.ndarray, Series, default None
Times corresponding to the observations. Must be monotonically increasing
and ``datetime64[ns]`` dtype.
halflife : float, str, timedelta, optional
Half-life specifying the decay
Returns
-------
np.ndarray
Diff of the times divided by the half-life
"""
# error: Item "str" of "Union[str, ndarray, FrameOrSeries, None]" has no
# attribute "view"
# error: Item "None" of "Union[str, ndarray, FrameOrSeries, None]" has no
# attribute "view"
_times = np.asarray(
times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
)
_halflife = float(Timedelta(halflife).value)
return np.diff(_times) / _halflife
class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponential weighted (EW) functions.
Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``.
Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be
provided.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`.
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`.
halflife : float, str, timedelta, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for
:math:`halflife > 0`.
If ``times`` is specified, the time unit (str or timedelta) over which an
observation decays to half its value. Only applicable to ``mean()``
and halflife value will not apply to the other functions.
.. versionadded:: 1.1.0
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`.
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : bool, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average).
- When ``adjust=True`` (default), the EW function is calculated using weights
:math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series
[:math:`x_0, x_1, ..., x_t`] would be:
.. math::
y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 -
\alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t}
- When ``adjust=False``, the exponentially weighted function is calculated
recursively:
.. math::
\begin{split}
y_0 &= x_0\\
y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,
\end{split}
ignore_na : bool, default False
Ignore missing values when calculating weights; specify ``True`` to reproduce
pre-0.15.0 behavior.
- When ``ignore_na=False`` (default), weights are based on absolute positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in calculating
the final weighted average of [:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and
:math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.
- When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`
used in calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if
``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.
axis : {0, 1}, default 0
The axis to use. The value 0 identifies the rows, and 1
identifies the columns.
times : str, np.ndarray, Series, default None
.. versionadded:: 1.1.0
Times corresponding to the observations. Must be monotonically increasing and
``datetime64[ns]`` dtype.
If str, the name of the column in the DataFrame representing the times.
If 1-D array like, a sequence with the same shape as the observations.
Only applicable to ``mean()``.
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Only applicable to ``mean()``
.. versionadded:: 1.4.0
Returns
-------
DataFrame
A Window sub-classed for the particular operation.
See Also
--------
rolling : Provides rolling window calculations.
expanding : Provides expanding transformations.
Notes
-----
More details can be found at:
:ref:`Exponentially weighted windows <window.exponentially_weighted>`.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Specifying ``times`` with a timedelta ``halflife`` when computing mean.
>>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
>>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
B
0 0.000000
1 0.585786
2 1.523889
3 1.523889
4 3.233686
"""
_attributes = [
"com",
"span",
"halflife",
"alpha",
"min_periods",
"adjust",
"ignore_na",
"axis",
"times",
"method",
]
def __init__(
self,
obj: FrameOrSeries,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
method: str = "single",
*,
selection=None,
):
super().__init__(
obj=obj,
min_periods=1 if min_periods is None else max(int(min_periods), 1),
on=None,
center=False,
closed=None,
method=method,
axis=axis,
selection=selection,
)
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.adjust = adjust
self.ignore_na = ignore_na
self.times = times
if self.times is not None:
if not self.adjust:
raise NotImplementedError("times is not supported with adjust=False.")
if isinstance(self.times, str):
self.times = self._selected_obj[self.times]
if not is_datetime64_ns_dtype(self.times):
raise ValueError("times must be datetime64[ns] dtype.")
# error: Argument 1 to "len" has incompatible type "Union[str, ndarray,
# FrameOrSeries, None]"; expected "Sized"
if len(self.times) != len(obj): # type: ignore[arg-type]
raise ValueError("times must be the same length as the object.")
if not isinstance(self.halflife, (str, datetime.timedelta)):
raise ValueError(
"halflife must be a string or datetime.timedelta object"
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
# Halflife is no longer applicable when calculating COM
# But allow COM to still be calculated if the user passes other decay args
if common.count_not_none(self.com, self.span, self.alpha) > 0:
self._com = get_center_of_mass(self.com, self.span, None, self.alpha)
else:
self._com = 1.0
else:
if self.halflife is not None and isinstance(
self.halflife, (str, datetime.timedelta)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
"times is not None."
)
# Without times, points are equally spaced
self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64)
self._com = get_center_of_mass(
# error: Argument 3 to "get_center_of_mass" has incompatible type
# "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]";
# expected "Optional[float]"
self.com,
self.span,
self.halflife, # type: ignore[arg-type]
self.alpha,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExponentialMovingWindowIndexer()
def online(self, engine="numba", engine_kwargs=None):
"""
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
.. versionadded:: 1.3.0
Parameters
----------
engine: str, default ``'numba'``
Execution engine to calculate online aggregations.
Applies to all supported aggregation methods.
engine_kwargs : dict, default None
Applies to all supported aggregation methods.
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
Returns
-------
OnlineExponentialMovingWindow
"""
return OnlineExponentialMovingWindow(
obj=self.obj,
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
min_periods=self.min_periods,
adjust=self.adjust,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
engine=engine,
engine_kwargs=engine_kwargs,
selection=self._selection,
)
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes.replace("\n", "", 1),
window_method="ewm",
aggregation_description="(exponential weighted moment) mean",
agg_method="mean",
)
def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "single":
ewma_func = generate_numba_ewma_func(
engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
numba_cache_key = (lambda x: x, "ewma")
else:
ewma_func = generate_ewma_numba_table_func(
engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
numba_cache_key = (lambda x: x, "ewma_table")
return self._apply(
ewma_func,
numba_cache_key=numba_cache_key,
)
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
nv.validate_window_func("mean", args, kwargs)
window_func = partial(
window_aggregations.ewma,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=self._deltas,
)
return self._apply(window_func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) standard deviation",
agg_method="std",
)
def std(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
return zsqrt(self.var(bias=bias, **kwargs))
def vol(self, bias: bool = False, *args, **kwargs):
warnings.warn(
(
"vol is deprecated will be removed in a future version. "
"Use std instead."
),
FutureWarning,
stacklevel=2,
)
return self.std(bias, *args, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) variance",
agg_method="var",
)
def var(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
wfunc = partial(
window_func,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
bias=bias,
)
def var_func(values, begin, end, min_periods):
return wfunc(values, begin, end, min_periods, values)
return self._apply(var_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame , optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample covariance",
agg_method="cov",
)
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
result = window_aggregations.ewmcov(
x_array,
start,
end,
# error: Argument 4 to "ewmcov" has incompatible type
# "Optional[int]"; expected "int"
self.min_periods, # type: ignore[arg-type]
y_array,
self._com,
self.adjust,
self.ignore_na,
bias,
)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample correlation",
agg_method="corr",
)
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
def _cov(X, Y):
return window_aggregations.ewmcov(
X,
start,
end,
min_periods,
Y,
self._com,
self.adjust,
self.ignore_na,
True,
)
with np.errstate(all="ignore"):
cov = _cov(x_array, y_array)
x_var = _cov(x_array, x_array)
y_var = _cov(y_array, y_array)
result = cov / zsqrt(x_var * y_var)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):
"""
Provide an exponential moving window groupby implementation.
"""
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
def __init__(self, obj, *args, _grouper=None, **kwargs):
super().__init__(obj, *args, _grouper=_grouper, **kwargs)
if not obj.empty and self.times is not None:
# sort the times and recalculate the deltas according to the groups
groupby_order = np.concatenate(list(self._grouper.indices.values()))
self._deltas = _calculate_deltas(
self.times.take(groupby_order), # type: ignore[union-attr]
self.halflife,
)
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
window_indexer = GroupbyIndexer(
groupby_indicies=self._grouper.indices,
window_indexer=ExponentialMovingWindowIndexer,
)
return window_indexer
class OnlineExponentialMovingWindow(ExponentialMovingWindow):
def __init__(
self,
obj: FrameOrSeries,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
engine: str = "numba",
engine_kwargs: dict[str, bool] | None = None,
*,
selection=None,
):
if times is not None:
raise NotImplementedError(
"times is not implemented with online operations."
)
super().__init__(
obj=obj,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
selection=selection,
)
self._mean = EWMMeanState(
self._com, self.adjust, self.ignore_na, self.axis, obj.shape
)
if maybe_use_numba(engine):
self.engine = engine
self.engine_kwargs = engine_kwargs
else:
raise ValueError("'numba' is the only supported engine")
def reset(self):
"""
Reset the state captured by `update` calls.
"""
self._mean.reset()
def aggregate(self, func, *args, **kwargs):
return NotImplementedError
def std(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
return NotImplementedError
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
return NotImplementedError
def var(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def mean(self, *args, update=None, update_times=None, **kwargs):
"""
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
"""
result_kwargs = {}
is_frame = True if self._selected_obj.ndim == 2 else False
if update_times is not None:
raise NotImplementedError("update_times is not implemented.")
else:
update_deltas = np.ones(
max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64
)
if update is not None:
if self._mean.last_ewm is None:
raise ValueError(
"Must call mean with update=None first before passing update"
)
result_from = 1
result_kwargs["index"] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs["columns"] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs["name"] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs["index"] = self._selected_obj.index
if is_frame:
result_kwargs["columns"] = self._selected_obj.columns
else:
result_kwargs["name"] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm(
np_array if is_frame else np_array[:, np.newaxis],
update_deltas,
self.min_periods,
ewma_func,
)
if not is_frame:
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
| {
"content_hash": "88a1348c523d44e2e0f4a89defb01c71",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 88,
"avg_line_length": 33.836506159014554,
"alnum_prop": 0.5512973259200423,
"repo_name": "gfyoung/pandas",
"id": "ee99692b85432c4cc1d87a83d9c77366b74546d2",
"size": "30216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/window/ewm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import math
__all__ = ["ResNet", "ResNet50", "ResNet101", "ResNet152"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNet():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_filters = [64, 128, 256, 512]
conv = self.conv_bn_layer(
input=input, num_filters=64, filter_size=7, stride=2, act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv,
stdv)))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None)
short = self.shortcut(input, num_filters * 4, stride)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def ResNet50():
model = ResNet(layers=50)
return model
def ResNet101():
model = ResNet(layers=101)
return model
def ResNet152():
model = ResNet(layers=152)
return model
| {
"content_hash": "2fb1b0317565ce3d684688ccacc3b6d0",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 92,
"avg_line_length": 31.016393442622952,
"alnum_prop": 0.5118921775898521,
"repo_name": "kuke/models",
"id": "def99db6d84673b77582cf93374f4cb2f00e9ac5",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleCV/image_classification/models/resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myproject.settings")
app = Celery('myproject')
CELERY_TIMEZONE = 'UTC'
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) | {
"content_hash": "090bb36f365fd64fc5dd691f688e78f6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 69,
"avg_line_length": 22.4,
"alnum_prop": 0.7827380952380952,
"repo_name": "ilonajulczuk/docker-django-celery",
"id": "3d25be1a4c2f30da9f8c22b06e853543848f2419",
"size": "350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "myproject/myproject/celeryconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6826"
},
{
"name": "Shell",
"bytes": "228"
}
],
"symlink_target": ""
} |
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def ontology_term_1(so_ont, award, lab):
return{
"schema_version": '1',
"term_id": 'SO:0001111',
"term_name": 'so_term',
"source_ontology": so_ont['@id']
}
def test_ontology_term_1_2(
app, ontology_term_1, so_ont):
migrator = app.registry['upgrader']
value = migrator.upgrade('ontology_term', ontology_term_1, current_version='1', target_version='2')
assert value['schema_version'] == '2'
assert value['source_ontologies'][0] == so_ont['@id']
| {
"content_hash": "7f39809169775d2d6eaa0276d9a1d368",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 103,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.6187290969899666,
"repo_name": "hms-dbmi/fourfront",
"id": "b85f4419b531490a1f189c9143fd6d571b665fca",
"size": "598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_upgrade_ontology_term.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "198339"
},
{
"name": "Cucumber",
"bytes": "16918"
},
{
"name": "HTML",
"bytes": "371973"
},
{
"name": "JavaScript",
"bytes": "1403972"
},
{
"name": "Makefile",
"bytes": "110"
},
{
"name": "PLpgSQL",
"bytes": "12067"
},
{
"name": "Python",
"bytes": "751772"
},
{
"name": "Ruby",
"bytes": "1066"
},
{
"name": "Shell",
"bytes": "2248"
}
],
"symlink_target": ""
} |
"""pycodestyle support."""
from pycodestyle import BaseReport, Checker, StyleGuide, get_parser
from pylama.context import RunContext
from pylama.lint import LinterV2 as Abstract
class Linter(Abstract):
"""pycodestyle runner."""
name = "pycodestyle"
def run_check(self, ctx: RunContext): # noqa
"""Check code with pycodestyle."""
params = ctx.get_params("pycodestyle")
options = ctx.options
if options:
params.setdefault("max_line_length", options.max_line_length)
if params:
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
style = StyleGuide(reporter=_PycodestyleReport, **params)
options = style.options
options.report.ctx = ctx # type: ignore
checker = Checker(ctx.filename, lines=ctx.lines, options=options)
checker.check_all()
class _PycodestyleReport(BaseReport):
ctx: RunContext
def error(self, line_number, offset, text, _):
"""Save errors."""
code, _, text = text.partition(" ")
self.ctx.push(
text=text,
type=code[0],
number=code,
col=offset + 1,
lnum=line_number,
source="pycodestyle",
)
| {
"content_hash": "1d761f361aa104e8ebb1b0bfffd071c4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 30.489795918367346,
"alnum_prop": 0.5850066934404283,
"repo_name": "klen/pylama",
"id": "1b5f99ff1ef9a70fe42a97cbce5844dc110efe6b",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pylama/lint/pylama_pycodestyle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "209"
},
{
"name": "Makefile",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "80335"
}
],
"symlink_target": ""
} |
import collections
import functools
import re
import warnings
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.inspect import func_supports_parameter
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or getattr(expr, 'alias', None) not in pk_aliases
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
if self.query.combinator:
src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
parts += (compiler.as_sql(),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limits and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for select, _, alias in self.select:
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) AS subquery' % (
', '.join(sub_selects),
' '.join(result),
), sub_params + params
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in self.query.alias_map:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
path = parent_path + [klass_info['field'].name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
if related_klass_info['field'].name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
convs = []
for conv in (backend_converters + field_converters):
if func_supports_parameter(conv, 'context'):
warnings.warn(
'Remove the context parameter from %s.%s(). Support for it '
'will be removed in Django 3.0.' % (
conv.__self__.__class__.__name__,
conv.__name__,
),
RemovedInDjango30Warning,
)
conv = functools.partial(conv, context={})
convs.append(conv)
converters[i] = (convs, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in rows:
row = list(row)
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield tuple(row)
def results_iter(self, results=None):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch and not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super().__init__(*args, **kwargs)
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
if self.return_id and self.connection.features.can_return_id_from_insert:
if self.connection.features.can_return_ids_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_ids_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_id_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
| {
"content_hash": "19683e5f6b46079c55b2e0975fe84762",
"timestamp": "",
"source": "github",
"line_count": 1400,
"max_line_length": 118,
"avg_line_length": 44.95928571428571,
"alnum_prop": 0.5572660978980983,
"repo_name": "mlavin/django",
"id": "f92df8c996c207cddf64b75020b667e35796aa0a",
"size": "62943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/sql/compiler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52372"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11482414"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""
Python client for InfluxDB
"""
from functools import wraps
import json
import socket
import time
import threading
import random
import requests
import requests.exceptions
from sys import version_info
from influxdb.line_protocol import make_lines
from influxdb.resultset import ResultSet
from .exceptions import InfluxDBClientError
from .exceptions import InfluxDBServerError
try:
xrange
except NameError:
xrange = range
if version_info[0] == 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
class InfluxDBClient(object):
"""The :class:`~.InfluxDBClient` object holds information necessary to
connect to InfluxDB. Requests can be made to InfluxDB directly through
the client.
:param host: hostname to connect to InfluxDB, defaults to 'localhost'
:type host: str
:param port: port to connect to InfluxDB, defaults to 8086
:type port: int
:param username: user to connect, defaults to 'root'
:type username: str
:param password: password of the user, defaults to 'root'
:type password: str
:param database: database name to connect to, defaults to None
:type database: str
:param ssl: use https instead of http to connect to InfluxDB, defaults to
False
:type ssl: bool
:param verify_ssl: verify SSL certificates for HTTPS requests, defaults to
False
:type verify_ssl: bool
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
:param use_udp: use UDP to connect to InfluxDB, defaults to False
:type use_udp: int
:param udp_port: UDP port to connect to InfluxDB, defaults to 4444
:type udp_port: int
:param proxies: HTTP(S) proxy to use for Requests, defaults to {}
:type proxies: dict
"""
def __init__(self,
host='localhost',
port=8086,
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
use_udp=False,
udp_port=4444,
proxies=None,
):
"""Construct a new InfluxDBClient object."""
self.__host = host
self.__port = port
self._username = username
self._password = password
self._database = database
self._timeout = timeout
self._verify_ssl = verify_ssl
self.use_udp = use_udp
self.udp_port = udp_port
self._session = requests.Session()
if use_udp:
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._scheme = "http"
if ssl is True:
self._scheme = "https"
if proxies is None:
self._proxies = {}
else:
self._proxies = proxies
self.__baseurl = "{0}://{1}:{2}".format(
self._scheme,
self._host,
self._port)
self._headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'
}
# _baseurl, _host and _port are properties to allow InfluxDBClusterClient
# to override them with thread-local variables
@property
def _baseurl(self):
return self._get_baseurl()
def _get_baseurl(self):
return self.__baseurl
@property
def _host(self):
return self._get_host()
def _get_host(self):
return self.__host
@property
def _port(self):
return self._get_port()
def _get_port(self):
return self.__port
@staticmethod
def from_DSN(dsn, **kwargs):
"""Return an instance of :class:`~.InfluxDBClient` from the provided
data source name. Supported schemes are "influxdb", "https+influxdb"
and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient`
constructor may also be passed to this method.
:param dsn: data source name
:type dsn: string
:param kwargs: additional parameters for `InfluxDBClient`
:type kwargs: dict
:raises ValueError: if the provided DSN has any unexpected values
:Example:
::
>> cli = InfluxDBClient.from_DSN('influxdb://username:password@\
localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_DSN('udp+influxdb://username:pass@\
localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
.. note:: parameters provided in `**kwargs` may override dsn parameters
.. note:: when using "udp+influxdb" the specified port (if any) will
be used for the TCP connection; specify the UDP port with the
additional `udp_port` parameter (cf. examples).
"""
init_args = parse_dsn(dsn)
host, port = init_args.pop('hosts')[0]
init_args['host'] = host
init_args['port'] = port
init_args.update(kwargs)
return InfluxDBClient(**init_args)
def switch_database(self, database):
"""Change the client's database.
:param database: the name of the database to switch to
:type database: str
"""
self._database = database
def switch_user(self, username, password):
"""Change the client's username.
:param username: the username to switch to
:type username: str
:param password: the password for the username
:type password: str
"""
self._username = username
self._password = password
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200, headers=None):
"""Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBServerError: if the response code is any server error
code (5xx)
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
"""
url = "{0}/{1}".format(self._baseurl, url)
if headers is None:
headers = self._headers
if params is None:
params = {}
if isinstance(data, (dict, list)):
data = json.dumps(data)
# Try to send the request a maximum of three times. (see #103)
# TODO (aviau): Make this configurable.
for i in range(0, 3):
try:
response = self._session.request(
method=method,
url=url,
auth=(self._username, self._password),
params=params,
data=data,
headers=headers,
proxies=self._proxies,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except requests.exceptions.ConnectionError as e:
if i < 2:
continue
else:
raise e
if response.status_code >= 500 and response.status_code < 600:
raise InfluxDBServerError(response.content)
elif response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
def write(self, data, params=None, expected_response_code=204):
"""Write data to InfluxDB.
:param data: the data to be written
:type data: dict
:param params: additional parameters for the request, defaults to None
:type params: dict
:param expected_response_code: the expected response code of the write
operation, defaults to 204
:type expected_response_code: int
:returns: True, if the write operation is successful
:rtype: bool
"""
headers = self._headers
headers['Content-type'] = 'application/octet-stream'
if params:
precision = params.get('precision')
else:
precision = None
self.request(
url="write",
method='POST',
params=params,
data=make_lines(data, precision).encode('utf-8'),
expected_response_code=expected_response_code,
headers=headers
)
return True
def query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True):
"""Send a query to InfluxDB.
:param query: the actual query string
:type query: str
:param params: additional parameters for the request, defaults to {}
:type params: dict
:param expected_response_code: the expected status code of response,
defaults to 200
:type expected_response_code: int
:param database: database to query, defaults to None
:type database: str
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:type raise_errors: bool
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
if params is None:
params = {}
params['q'] = query
params['db'] = database or self._database
if epoch is not None:
params['epoch'] = epoch
response = self.request(
url="query",
method='GET',
params=params,
data=None,
expected_response_code=expected_response_code
)
data = response.json()
results = [
ResultSet(result, raise_errors=raise_errors)
for result
in data.get('results', [])
]
# TODO(aviau): Always return a list. (This would be a breaking change)
if len(results) == 1:
return results[0]
else:
return results
def write_points(self,
points,
time_precision=None,
database=None,
retention_policy=None,
tags=None,
batch_size=None,
):
"""Write to multiple time series names.
:param points: the list of points to be written in the database
:type points: list of dictionaries, each dictionary represents a point
:param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
:type time_precision: str
:param database: the database to write the points to. Defaults to
the client's current database
:type database: str
:param tags: a set of key-value pairs associated with each point. Both
keys and values must be strings. These are shared tags and will be
merged with point-specific tags, defaults to None
:type tags: dict
:param retention_policy: the retention policy for the points. Defaults
to None
:type retention_policy: str
:param batch_size: value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation,
defaults to None
:type batch_size: int
:returns: True, if the operation is successful
:rtype: bool
.. note:: if no retention policy is specified, the default retention
policy for the database is used
"""
if batch_size and batch_size > 0:
for batch in self._batches(points, batch_size):
self._write_points(points=batch,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags)
return True
else:
return self._write_points(points=points,
time_precision=time_precision,
database=database,
retention_policy=retention_policy,
tags=tags)
def _batches(self, iterable, size):
for i in xrange(0, len(iterable), size):
yield iterable[i:i + size]
def _write_points(self,
points,
time_precision,
database,
retention_policy,
tags):
if time_precision not in ['n', 'u', 'ms', 's', 'm', 'h', None]:
raise ValueError(
"Invalid time precision is given. "
"(use 'n', 'u', 'ms', 's', 'm' or 'h')")
if self.use_udp and time_precision and time_precision != 's':
raise ValueError(
"InfluxDB only supports seconds precision for udp writes"
)
data = {
'points': points
}
if tags is not None:
data['tags'] = tags
params = {
'db': database or self._database
}
if time_precision is not None:
params['precision'] = time_precision
if retention_policy is not None:
params['rp'] = retention_policy
if self.use_udp:
self.send_packet(data)
else:
self.write(
data=data,
params=params,
expected_response_code=204
)
return True
def get_list_database(self):
"""Get the list of databases in InfluxDB.
:returns: all databases in InfluxDB
:rtype: list of dictionaries
:Example:
::
>> dbs = client.get_list_database()
>> dbs
[{u'name': u'db1'}, {u'name': u'db2'}, {u'name': u'db3'}]
"""
return list(self.query("SHOW DATABASES").get_points())
def create_database(self, dbname, if_not_exists=False):
"""Create a new database in InfluxDB.
:param dbname: the name of the database to create
:type dbname: str
"""
if if_not_exists:
self.query("CREATE DATABASE IF NOT EXISTS \"%s\"" % dbname)
else:
self.query("CREATE DATABASE \"%s\"" % dbname)
def drop_database(self, dbname):
"""Drop a database from InfluxDB.
:param dbname: the name of the database to drop
:type dbname: str
"""
self.query("DROP DATABASE \"%s\"" % dbname)
def create_retention_policy(self, name, duration, replication,
database=None, default=False):
"""Create a retention policy for a database.
:param name: the name of the new retention policy
:type name: str
:param duration: the duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention – meaning the data will
never be deleted – use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the replication of the retention policy
:type replication: str
:param database: the database for which the retention policy is
created. Defaults to current client's database
:type database: str
:param default: whether or not to set the policy as default
:type default: bool
"""
query_string = \
"CREATE RETENTION POLICY %s ON %s " \
"DURATION %s REPLICATION %s" % \
(name, database or self._database, duration, replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string)
def alter_retention_policy(self, name, database=None,
duration=None, replication=None, default=None):
"""Mofidy an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention – meaning the data will
never be deleted – use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: str
:param default: whether or not to set the modified policy as default
:type default: bool
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
"""
query_string = (
"ALTER RETENTION POLICY {0} ON {1}"
).format(name, database or self._database)
if duration:
query_string += " DURATION {0}".format(duration)
if replication:
query_string += " REPLICATION {0}".format(replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string)
def get_list_retention_policies(self, database=None):
"""Get the list of retention policies for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all retention policies for the database
:rtype: list of dictionaries
:Example:
::
>> ret_policies = client.get_list_retention_policies('my_db')
>> ret_policies
[{u'default': True,
u'duration': u'0',
u'name': u'default',
u'replicaN': 1}]
"""
rsp = self.query(
"SHOW RETENTION POLICIES ON %s" % (database or self._database)
)
return list(rsp.get_points())
def get_list_series(self, database=None):
"""Get the list of series for a database.
:param database: the name of the database, defaults to the client's
current database
:type database: str
:returns: all series in the specified database
:rtype: list of dictionaries
:Example:
>> series = client.get_list_series('my_database')
>> series
[{'name': u'cpu_usage',
'tags': [{u'_id': 1,
u'host': u'server01',
u'region': u'us-west'}]}]
"""
rsp = self.query("SHOW SERIES", database=database)
series = []
for serie in rsp.items():
series.append(
{
"name": serie[0][0],
"tags": list(serie[1])
}
)
return series
def get_list_servers(self):
"""Get the list of servers in InfluxDB cluster.
:returns: all nodes in InfluxDB cluster
:rtype: list of dictionaries
:Example:
::
>> servers = client.get_list_servers()
>> servers
[{'cluster_addr': 'server01:8088',
'id': 1,
'raft': True,
'raft-leader': True}]
"""
return list(self.query("SHOW SERVERS").get_points())
def get_list_users(self):
"""Get the list of all users in InfluxDB.
:returns: all users in InfluxDB
:rtype: list of dictionaries
:Example:
::
>> users = client.get_list_users()
>> users
[{u'admin': True, u'user': u'user1'},
{u'admin': False, u'user': u'user2'},
{u'admin': False, u'user': u'user3'}]
"""
return list(self.query("SHOW USERS").get_points())
def create_user(self, username, password, admin=False):
"""Create a new user in InfluxDB
:param username: the new username to create
:type username: str
:param password: the password for the new user
:type password: str
:param admin: whether the user should have cluster administration
privileges or not
:type admin: boolean
"""
text = "CREATE USER \"{0}\" WITH PASSWORD '{1}'".format(username,
password)
if admin:
text += ' WITH ALL PRIVILEGES'
self.query(text)
def drop_user(self, username):
"""Drop an user from InfluxDB.
:param username: the username to drop
:type username: str
"""
text = "DROP USER {0}".format(username)
self.query(text)
def set_user_password(self, username, password):
"""Change the password of an existing user.
:param username: the username who's password is being changed
:type username: str
:param password: the new password for the user
:type password: str
"""
text = "SET PASSWORD FOR {0} = '{1}'".format(username, password)
self.query(text)
def delete_series(self, database=None, measurement=None, tags=None):
"""Delete series from a database. Series can be filtered by
measurement and tags.
:param measurement: Delete all series from a measurement
:type id: string
:param tags: Delete all series that match given tags
:type id: dict
:param database: the database from which the series should be
deleted, defaults to client's current database
:type database: str
"""
database = database or self._database
query_str = 'DROP SERIES'
if measurement:
query_str += ' FROM "{0}"'.format(measurement)
if tags:
query_str += ' WHERE ' + ' and '.join(["{0}='{1}'".format(k, v)
for k, v in tags.items()])
self.query(query_str, database=database)
def revoke_admin_privileges(self, username):
"""Revoke cluster administration privileges from an user.
:param username: the username to revoke privileges from
:type username: str
.. note:: Only a cluster administrator can create/ drop databases
and manage users.
"""
text = "REVOKE ALL PRIVILEGES FROM {0}".format(username)
self.query(text)
def grant_privilege(self, privilege, database, username):
"""Grant a privilege on a database to an user.
:param privilege: the privilege to grant, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to grant the privilege on
:type database: str
:param username: the username to grant the privilege to
:type username: str
"""
text = "GRANT {0} ON {1} TO {2}".format(privilege,
database,
username)
self.query(text)
def revoke_privilege(self, privilege, database, username):
"""Revoke a privilege on a database from an user.
:param privilege: the privilege to revoke, one of 'read', 'write'
or 'all'. The string is case-insensitive
:type privilege: str
:param database: the database to revoke the privilege on
:type database: str
:param username: the username to revoke the privilege from
:type username: str
"""
text = "REVOKE {0} ON {1} FROM {2}".format(privilege,
database,
username)
self.query(text)
def send_packet(self, packet):
"""Send an UDP packet.
:param packet: the packet to be sent
:type packet: dict
"""
data = make_lines(packet).encode('utf-8')
self.udp_socket.sendto(data, (self._host, self.udp_port))
class InfluxDBClusterClient(object):
"""The :class:`~.InfluxDBClusterClient` is the client for connecting
to a cluster of InfluxDB servers. Each query hits different host from the
list of hosts.
:param hosts: all hosts to be included in the cluster, each of which
should be in the format (address, port),
e.g. [('127.0.0.1', 8086), ('127.0.0.1', 9096)]. Defaults to
[('localhost', 8086)]
:type hosts: list of tuples
:param shuffle: whether the queries should hit servers evenly(randomly),
defaults to True
:type shuffle: bool
:param client_base_class: the base class for the cluster client.
This parameter is used to enable the support of different client
types. Defaults to :class:`~.InfluxDBClient`
:param healing_delay: the delay in seconds, counting from last failure of
a server, before re-adding server to the list of working servers.
Defaults to 15 minutes (900 seconds)
"""
def __init__(self,
hosts=[('localhost', 8086)],
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
use_udp=False,
udp_port=4444,
shuffle=True,
client_base_class=InfluxDBClient,
healing_delay=900,
):
self.clients = [self] # Keep it backwards compatible
self.hosts = hosts
self.bad_hosts = [] # Corresponding server has failures in history
self.shuffle = shuffle
self.healing_delay = healing_delay
self._last_healing = time.time()
host, port = self.hosts[0]
self._hosts_lock = threading.Lock()
self._thread_local = threading.local()
self._client = client_base_class(host=host,
port=port,
username=username,
password=password,
database=database,
ssl=ssl,
verify_ssl=verify_ssl,
timeout=timeout,
use_udp=use_udp,
udp_port=udp_port)
for method in dir(client_base_class):
orig_attr = getattr(client_base_class, method, '')
if method.startswith('_') or not callable(orig_attr):
continue
setattr(self, method, self._make_func(orig_attr))
self._client._get_host = self._get_host
self._client._get_port = self._get_port
self._client._get_baseurl = self._get_baseurl
self._update_client_host(self.hosts[0])
@staticmethod
def from_DSN(dsn, client_base_class=InfluxDBClient,
shuffle=True, **kwargs):
"""Same as :meth:`~.InfluxDBClient.from_DSN`, but supports
multiple servers.
:param shuffle: whether the queries should hit servers
evenly(randomly), defaults to True
:type shuffle: bool
:param client_base_class: the base class for all clients in the
cluster. This parameter is used to enable the support of
different client types. Defaults to :class:`~.InfluxDBClient`
:Example:
::
>> cluster = InfluxDBClusterClient.from_DSN('influxdb://usr:pwd\
@host1:8086,usr:pwd@host2:8086/db_name', timeout=5)
>> type(cluster)
<class 'influxdb.client.InfluxDBClusterClient'>
>> cluster.hosts
[('host1', 8086), ('host2', 8086)]
>> cluster._client
<influxdb.client.InfluxDBClient at 0x7feb438ec950>]
"""
init_args = parse_dsn(dsn)
init_args.update(**kwargs)
init_args['shuffle'] = shuffle
init_args['client_base_class'] = client_base_class
cluster_client = InfluxDBClusterClient(**init_args)
return cluster_client
def _update_client_host(self, host):
self._thread_local.host, self._thread_local.port = host
self._thread_local.baseurl = "{0}://{1}:{2}".format(
self._client._scheme,
self._client._host,
self._client._port
)
def _get_baseurl(self):
return self._thread_local.baseurl
def _get_host(self):
return self._thread_local.host
def _get_port(self):
return self._thread_local.port
def _make_func(self, orig_func):
@wraps(orig_func)
def func(*args, **kwargs):
now = time.time()
with self._hosts_lock:
if (self.bad_hosts and
self._last_healing + self.healing_delay < now):
h = self.bad_hosts.pop(0)
self.hosts.append(h)
self._last_healing = now
if self.shuffle:
random.shuffle(self.hosts)
hosts = self.hosts + self.bad_hosts
for h in hosts:
bad_host = False
try:
self._update_client_host(h)
return orig_func(self._client, *args, **kwargs)
except InfluxDBClientError as e:
# Errors caused by user's requests, re-raise
raise e
except Exception as e:
# Errors that might caused by server failure, try another
bad_host = True
with self._hosts_lock:
if h in self.hosts:
self.hosts.remove(h)
self.bad_hosts.append(h)
self._last_healing = now
finally:
with self._hosts_lock:
if not bad_host and h in self.bad_hosts:
self.bad_hosts.remove(h)
self.hosts.append(h)
raise InfluxDBServerError("InfluxDB: no viable server!")
return func
def parse_dsn(dsn):
conn_params = urlparse(dsn)
init_args = {}
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{0}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{0}".'.format(modifier))
netlocs = conn_params.netloc.split(',')
init_args['hosts'] = []
for netloc in netlocs:
parsed = _parse_netloc(netloc)
init_args['hosts'].append((parsed['host'], int(parsed['port'])))
init_args['username'] = parsed['username']
init_args['password'] = parsed['password']
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
return init_args
def _parse_netloc(netloc):
info = urlparse("http://{0}".format(netloc))
return {'username': info.username or None,
'password': info.password or None,
'host': info.hostname or 'localhost',
'port': info.port or 8086}
| {
"content_hash": "707d16126cbbbe69015cb522d1ff5e7d",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 79,
"avg_line_length": 34.48062827225131,
"alnum_prop": 0.5516717786753318,
"repo_name": "savoirfairelinux/influxdb-python",
"id": "8146d68d4eaced71b22d2db5941c4227bf971610",
"size": "32961",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "influxdb/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "245137"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
import md5
import sys
import os
from datetime import datetime
from datetime import timedelta
import platform
class CSV():
"""docstring for CSV"""
def __init__(self, file_name, mode='w+', separador=','):
try:
self.file=open(file_name,mode)
except Exception:
windows=platform.system()=='Windows'
if windows:
directory=os.environ['USERPROFILE']+'\\My Documents\\'
os.makedirs(directory+"relatorios")
else:
full_path = os.path.realpath(__file__)
directory=os.path.dirname(full_path)
os.makedirs(directory+"/relatorios")
finally:
self.file=open(file_name,mode)
self.separador=separador
def writerow(self,linha):
temp_str=u""
for elem in linha:
try:
if not (isinstance(elem, str) or isinstance(elem, unicode)):
temp_str=temp_str+str(elem)+self.separador
else:
temp_str=temp_str+elem+self.separador
except UnicodeDecodeError:
print elem
temp_str=temp_str[:-len(self.separador)]+'\n'
self.file.write(temp_str.encode('utf8'))
def finaliza(self):
self.file.close()
def criptografar_Senha(senha):
m = md5.new()
for i in range(32):
m.update(senha)
return m.hexdigest()
def dia_Semana_Int2str(num,completo=True):
dias=[u'Dom',u'Seg',u'Ter',u'Qua',u'Qui',u'Sex',u'Sab']
dias2=[u'Domingo',u'Segunda',u'Terça',u'Quarta',u'Quinta',u'Sexta',u'Sabado']
if completo==True:
return dias2[num]
return dias[num]
## Altera o nome do processo
# @parm newname Novo nome do processo
def set_proc_name(newname):
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(newname)+1)
buff.value = newname
libc.prctl(15, byref(buff), 0, 0, 0)
def get_Week_Day(data=None):
wd=None
if data==None:
wd=datetime.now().weekday()+1
else:
wd=data.weekday()+1
return wd if wd!=7 else 0
def string_2_Timedelta(tempo):
t1 = datetime.strptime(tempo, '%H:%M:%S')
t2 = datetime.strptime("00:00:00", '%H:%M:%S')
return t1-t2
def string_2_Time(tempo):
return str(string_2_Timedelta(tempo))
def string_2_Datetime(data):
from datetime import datetime
return datetime.strptime(str(data), "%Y-%m-%d %H:%M:%S.%f")
def data_Atual(string=False):
if string==True:
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return datetime.now()
def falar(ponto,nome):
mensagem=u""
if ponto==2:
mensagem='Ola '.encode('utf-8')+nome+" , seu ponto de entrada foi registrado".encode('utf-8')
if ponto==3:
mensagem=u"Tiau ".encode('utf-8')+nome+" , seu ponto de saida foi registrado".encode('utf-8')
if ponto==1:
mensagem=u'Ola '+nome+" , voce nao tem ponto nesse horario mas vou abrir a porta para voce, s2"
comando=u'espeak -v brazil "'+mensagem+'"'
# os.system(comando)
| {
"content_hash": "bf0208d94f165ff5a6fcaa3b5ae36c85",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 103,
"avg_line_length": 32.103092783505154,
"alnum_prop": 0.5963391136801541,
"repo_name": "Mecajun/CdA",
"id": "884317f30eda16737d359390464753a07ce4a22e",
"size": "3139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "auxiliares.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "25231"
},
{
"name": "C++",
"bytes": "82465"
},
{
"name": "Python",
"bytes": "148890"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
import logging
import os
import yaml
import subprocess
import tempfile
RUNTIME_BUCKET = 'runtime-builders'
RUNTIME_BUCKET_PREFIX = 'gs://{0}/'.format(RUNTIME_BUCKET)
MANIFEST_FILE = RUNTIME_BUCKET_PREFIX + 'runtimes.yaml'
SCHEMA_VERSION = 1
def copy_to_gcs(file_path, gcs_path):
command = ['gsutil', 'cp', file_path, gcs_path]
try:
output = subprocess.check_output(command)
logging.debug(output)
except subprocess.CalledProcessError as cpe:
logging.error('Error encountered when writing to GCS! %s', cpe)
except Exception as e:
logging.error('Fatal error encountered when shelling command {0}'
.format(command))
logging.error(e)
def write_to_gcs(gcs_path, file_contents):
try:
logging.info(gcs_path)
fd, f_name = tempfile.mkstemp(text=True)
os.write(fd, file_contents)
copy_to_gcs(f_name, gcs_path)
finally:
os.remove(f_name)
def get_file_from_gcs(gcs_file, temp_file):
command = ['gsutil', 'cp', gcs_file, temp_file]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
logging.error('Error when retrieving file from GCS! {0}'
.format(e.output))
return False
def load_manifest_file():
try:
_, tmp = tempfile.mkstemp(text=True)
command = ['gsutil', 'cp', MANIFEST_FILE, tmp]
subprocess.check_output(command, stderr=subprocess.STDOUT)
with open(tmp) as f:
return yaml.load(f)
except subprocess.CalledProcessError:
logging.info('Manifest file not found in GCS: creating new one.')
return {'schema_version': SCHEMA_VERSION}
finally:
os.remove(tmp)
# 'gsutil ls' would eliminate the try/catch here, but it's eventually
# consistent, while 'gsutil stat' is strongly consistent.
def file_exists(remote_path):
try:
logging.info('Checking file {0}'.format(remote_path))
command = ['gsutil', 'stat', remote_path]
subprocess.check_call(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
return False
class Node:
def __init__(self, name, isBuilder, child):
self.name = name
self.isBuilder = isBuilder
self.child = child
def __repr__(self):
return '{0}: {1}|{2}'.format(self.name, self.isBuilder, self.child)
| {
"content_hash": "9719cfa06294956c766c7176c6e8c53b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.6342042755344418,
"repo_name": "priyawadhwa/runtimes-common",
"id": "32a0c00eba29bfedc86477177ff628b22d64ef3f",
"size": "3139",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "appengine/runtime_builders/builder_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4463"
},
{
"name": "CSS",
"bytes": "1064"
},
{
"name": "Dockerfile",
"bytes": "3885"
},
{
"name": "Go",
"bytes": "194465"
},
{
"name": "HTML",
"bytes": "2736"
},
{
"name": "JavaScript",
"bytes": "4853"
},
{
"name": "Makefile",
"bytes": "18742"
},
{
"name": "PHP",
"bytes": "75349"
},
{
"name": "Python",
"bytes": "325583"
},
{
"name": "Shell",
"bytes": "5562"
},
{
"name": "Vue",
"bytes": "563"
}
],
"symlink_target": ""
} |
"""napalm.eos package."""
from napalm.eos.eos import EOSDriver
__all__ = ("EOSDriver",)
| {
"content_hash": "99e9193e828416594f0d0bcc6e6c115f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 36,
"avg_line_length": 22.25,
"alnum_prop": 0.6629213483146067,
"repo_name": "spotify/napalm",
"id": "032ff1787964bf96a5c79a300bad851c879dd35b",
"size": "714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "napalm/eos/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "349372"
},
{
"name": "Ruby",
"bytes": "2509"
},
{
"name": "Smarty",
"bytes": "696"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/droid/component/shared_droid_motive_system_advanced.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "75ff2c9e55909dd3f296aac806e7da18",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 99,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7073170731707317,
"repo_name": "obi-two/Rebelion",
"id": "875493ec5f5de29cb9ec79a6dc31f8385fd322b6",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/droid/component/shared_droid_motive_system_advanced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class DeBertaForSequenceClassificationTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer().setInputCols("document").setOutputCol("token")
doc_classifier = DeBertaForSequenceClassification \
.pretrained() \
.setInputCols(["document", "token"]) \
.setOutputCol("class")
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
doc_classifier
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
| {
"content_hash": "c1861fe01a689d4e23d99df38039f3f9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 109,
"avg_line_length": 28.62162162162162,
"alnum_prop": 0.6336166194523135,
"repo_name": "JohnSnowLabs/spark-nlp",
"id": "7621535fdcdece2cd560392682ba06de40e3f48f",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/annotator/classifier_dl/deberta_for_sequence_classification_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14452"
},
{
"name": "Java",
"bytes": "223289"
},
{
"name": "Makefile",
"bytes": "819"
},
{
"name": "Python",
"bytes": "1694517"
},
{
"name": "Scala",
"bytes": "4116435"
},
{
"name": "Shell",
"bytes": "5286"
}
],
"symlink_target": ""
} |
from datetime import timedelta, datetime
import dateutil
from django.core.cache import cache
from django.core.urlresolvers import reverse
import operator
import pytz
from casexml.apps.case.models import CommCareCaseGroup
from corehq.apps.groups.models import Group
from corehq.apps.reports import util
from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher
from corehq.apps.reports.exceptions import BadRequestError
from corehq.apps.reports.fields import FilterUsersField
from corehq.apps.reports.generic import GenericReportView
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.users.models import CommCareUser
from dimagi.utils.dates import DateSpan
from django.utils.translation import ugettext_noop
from dimagi.utils.decorators.memoized import memoized
DATE_FORMAT = "%Y-%m-%d"
class ProjectReport(GenericReportView):
# overriding properties from GenericReportView
section_name = ugettext_noop("Project Reports")
base_template = 'reports/base_template.html'
dispatcher = ProjectReportDispatcher
asynchronous = True
@property
def default_report_url(self):
return reverse('reports_home', args=[self.request.project])
def set_announcements(self):
if self.request.couch_user:
util.set_report_announcements_for_user(self.request, self.request.couch_user)
class CustomProjectReport(ProjectReport):
dispatcher = CustomProjectReportDispatcher
emailable = True
class CommCareUserMemoizer(object):
@memoized
def by_domain(self, domain, is_active=True):
users = CommCareUser.by_domain(domain, is_active=is_active)
for user in users:
# put users in the cache for get_by_user_id
# so that function never has to touch the database
self.get_by_user_id.get_cache(self)[(self, user.user_id)] = user
return users
@memoized
def get_by_user_id(self, user_id):
return CommCareUser.get_by_user_id(user_id)
class ProjectReportParametersMixin(object):
"""
All the parameters necessary for the project reports.
Intended to be mixed in with a GenericReportView object.
"""
default_case_type = None
filter_group_name = None
filter_users_field_class = FilterUsersField
include_inactive = False
# set this to set the report's user ids from within the report
# (i.e. based on a filter's return value).
override_user_ids = None
need_group_ids = False
@property
@memoized
def CommCareUser(self):
return CommCareUserMemoizer()
@memoized
def get_all_users_by_domain(self, group=None, user_ids=None, user_filter=None, simplified=False):
return list(util.get_all_users_by_domain(
domain=self.domain,
group=group,
user_ids=user_ids,
user_filter=user_filter,
simplified=simplified,
CommCareUser=self.CommCareUser
))
@property
@memoized
def user_filter(self):
return self.filter_users_field_class.get_user_filter(self.request)[0]
@property
@memoized
def default_user_filter(self):
return self.filter_users_field_class.get_user_filter(None)[0]
@property
def group_id(self):
return self.group_ids[0] if len(self.group_ids) else ''
@property
@memoized
def group(self):
if self.group_id and self.group_id != '_all':
return Group.get(self.group_id)
else:
return self.groups[0] if len(self.groups) else None
@property
def group_ids(self):
return filter(None, self.request.GET.getlist('group'))
@property
@memoized
def groups(self):
from corehq.apps.groups.models import Group
if '_all' in self.group_ids or self.request.GET.get('all_groups', 'off') == 'on':
return Group.get_reporting_groups(self.domain)
return [Group.get(g) for g in self.group_ids]
@property
def individual(self):
"""
todo: remember this: if self.individual and self.users:
self.name = "%s for %s" % (self.name, self.users[0].get('raw_username'))
"""
return self.request_params.get('individual', '')
@property
def mobile_worker_ids(self):
ids = self.request.GET.getlist('select_mw')
if '_all' in ids or self.request.GET.get('all_mws', 'off') == 'on':
cache_str = "mw_ids:%s" % self.domain
ids = cache.get(cache_str)
if not ids:
cc_users = CommCareUser.by_domain(self.domain)
if self.include_inactive:
cc_users += CommCareUser.by_domain(self.domain, is_active=False)
ids = [ccu._id for ccu in cc_users]
cache.set(cache_str, ids, 24*60*60)
return ids
@property
@memoized
def users(self):
if self.filter_group_name and not (self.group_id or self.individual):
group = Group.by_name(self.domain, self.filter_group_name)
else:
group = self.group
if self.override_user_ids is not None:
user_ids = self.override_user_ids
else:
user_ids = [self.individual]
return self.get_all_users_by_domain(
group=group,
user_ids=tuple(user_ids),
user_filter=tuple(self.user_filter),
simplified=True
)
@property
@memoized
def user_ids(self):
return [user.get('user_id') for user in self.users]
_usernames = None
@property
@memoized
def usernames(self):
return dict([(user.get('user_id'), user.get('username_in_report')) for user in self.users])
@property
@memoized
def users_by_group(self):
user_dict = {}
for group in self.groups:
user_dict["%s|%s" % (group.name, group._id)] = self.get_all_users_by_domain(
group=group,
user_filter=tuple(self.default_user_filter),
simplified=True
)
if self.need_group_ids:
for users in user_dict.values():
for u in users:
u["group_ids"] = Group.by_user(u['user_id'], False)
return user_dict
@property
@memoized
def users_by_mobile_workers(self):
from corehq.apps.reports.util import _report_user_dict
user_dict = {}
for mw in self.mobile_worker_ids:
user_dict[mw] = _report_user_dict(CommCareUser.get_by_user_id(mw))
if self.need_group_ids:
for user in user_dict.values():
user["group_ids"] = Group.by_user(user["user_id"], False)
return user_dict
def get_admins_and_demo_users(self, ufilters=None):
ufilters = ufilters if ufilters is not None else ['1', '2', '3']
users = self.get_all_users_by_domain(
group=None,
user_filter=tuple(HQUserType.use_filter(ufilters)),
simplified=True
) if ufilters else []
if self.need_group_ids:
for u in users:
u["group_ids"] = Group.by_user(u, False)
return users
@property
@memoized
def admins_and_demo_users(self):
ufilters = [uf for uf in ['1', '2', '3'] if uf in self.request.GET.getlist('ufilter')]
users = self.get_admins_and_demo_users(ufilters)
return users
@property
@memoized
def admins_and_demo_user_ids(self):
return [user.get('user_id') for user in self.admins_and_demo_users]
@property
@memoized
def combined_users(self):
#todo: replace users with this and make sure it doesn't break existing reports
all_users = [user for sublist in self.users_by_group.values() for user in sublist]
all_users.extend([user for user in self.users_by_mobile_workers.values()])
all_users.extend([user for user in self.admins_and_demo_users])
return dict([(user['user_id'], user) for user in all_users]).values()
@property
@memoized
def combined_user_ids(self):
return [user.get('user_id') for user in self.combined_users]
@property
@memoized
def case_sharing_groups(self):
return set(reduce(operator.add, [[u['group_ids'] for u in self.combined_users]]))
@property
def history(self):
history = self.request_params.get('history', '')
if history:
try:
return dateutil.parser.parse(history)
except ValueError:
pass
@property
def case_type(self):
return self.default_case_type or self.request_params.get('case_type', '')
@property
def case_status(self):
from corehq.apps.reports.fields import SelectOpenCloseField
return self.request_params.get(SelectOpenCloseField.slug, '')
@property
def case_group_ids(self):
return filter(None, self.request.GET.getlist('case_group'))
@property
@memoized
def case_groups(self):
return [CommCareCaseGroup.get(g) for g in self.case_group_ids]
@property
@memoized
def cases_by_case_group(self):
case_ids = []
for group in self.case_groups:
case_ids.extend(group.cases)
return case_ids
class CouchCachedReportMixin(object):
"""
Use this mixin for caching reports as objects in couch.
"""
_cached_report = None
@property
def cached_report(self):
if not self._cached_report:
self._cached_report = self.fetch_cached_report()
return self._cached_report
def fetch_cached_report(self):
"""
Here's where you generate your cached report.
"""
raise NotImplementedError
class DatespanMixin(object):
"""
Use this where you'd like to include the datespan field.
"""
datespan_field = 'corehq.apps.reports.filters.dates.DatespanFilter'
datespan_default_days = 7
inclusive = True
_datespan = None
@property
def datespan(self):
if self._datespan is None:
datespan = self.default_datespan
if self.request.datespan.is_valid() and not self.request.datespan.is_default:
datespan.enddate = self.request.datespan.enddate
datespan.startdate = self.request.datespan.startdate
datespan.is_default = False
elif self.request.datespan.get_validation_reason() == "You can't use dates earlier than the year 1900":
raise BadRequestError()
self.request.datespan = datespan
# todo: don't update self.context here. find a better place! AGH! Sorry, sorry.
self.context.update(dict(datespan=datespan))
self._datespan = datespan
return self._datespan
@property
def default_datespan(self):
datespan = DateSpan.since(self.datespan_default_days, timezone=self.timezone, inclusive=self.inclusive)
datespan.is_default = True
return datespan
class MonthYearMixin(object):
"""
Similar to DatespanMixin, but works with MonthField and YearField
"""
fields = [MonthFilter, YearFilter]
_datespan = None
@property
def datespan(self):
if self._datespan is None:
datespan = DateSpan.from_month(self.month, self.year)
self.request.datespan = datespan
self.context.update(dict(datespan=datespan))
self._datespan = datespan
return self._datespan
@property
def month(self):
if 'month' in self.request_params:
return int(self.request_params['month'])
else:
return datetime.now().month
@property
def year(self):
if 'year' in self.request_params:
return int(self.request_params['year'])
else:
return datetime.now().year
| {
"content_hash": "7d0846da7a47a3e9ad812b737eb765a0",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 115,
"avg_line_length": 32.49322493224932,
"alnum_prop": 0.6275229357798165,
"repo_name": "gmimano/commcaretest",
"id": "4bc6860c630df0a1e60d8e882b41deced05db3ef",
"size": "11990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/reports/standard/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
from aleph.model import Document
from aleph.logic.documents import crawl_directory
from aleph.tests.util import TestCase
class IngestTestCase(TestCase):
def setUp(self):
super(IngestTestCase, self).setUp()
self.collection = self.create_collection()
def test_crawl_sample_directory(self):
samples_path = self.get_fixture_path("samples")
crawl_directory(self.collection, samples_path)
assert Document.all().count() == 4, Document.all().count()
| {
"content_hash": "e01c7d3086d88a0b48a335b94c8e4b79",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 35.214285714285715,
"alnum_prop": 0.7079107505070994,
"repo_name": "pudo/aleph",
"id": "3cdc15bd62c05d2a3a6b4e128ddf419425112b6d",
"size": "537",
"binary": false,
"copies": "2",
"ref": "refs/heads/dependabot/pip/develop/jsonschema-4.1.2",
"path": "aleph/tests/test_ingest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15949"
},
{
"name": "HTML",
"bytes": "170476"
},
{
"name": "JavaScript",
"bytes": "111287"
},
{
"name": "Makefile",
"bytes": "1319"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "492593"
}
],
"symlink_target": ""
} |
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not(wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
# self.app.wait.until(lambda driver: driver.find_element_by_link_text('groups'))
wd.find_element_by_link_text("groups").click()
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()#Среди выбранных чекбоксов выбираем по индексу
def fill_group_form(self, group):
wd = self.app.wd
self.change_field("group_name", group.name)
self.change_field("group_header", group.header)
self.change_field("group_footer", group.footer)
def change_field(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
def create(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)#Реализация одного метода через другой
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def edit_first(self, new_group_data):
self.edit_group_by_index(0)
def edit_group_by_index(self, index,new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache) | {
"content_hash": "c4819f14e8cd04781699c776d7e5be28",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 107,
"avg_line_length": 36.7319587628866,
"alnum_prop": 0.6053887173730003,
"repo_name": "elenagradovich/python_training",
"id": "c214a4fd5b4123d23e4033d6a87e20b85440fd99",
"size": "3636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37676"
}
],
"symlink_target": ""
} |
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
# however the function and object call signatures
# remained the same. This whole try/except block should
# be removed and replaced with a call to six.moves once
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
import xmlrpc.client as xmlrpclib
import six
from kwstandbyclient.openstack.common import gettextutils
from kwstandbyclient.openstack.common import importutils
from kwstandbyclient.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| {
"content_hash": "77ebd453e8be77d60bbde3c2745acc22",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 36.16969696969697,
"alnum_prop": 0.6261729222520107,
"repo_name": "frossigneux/python-kwstandbyclient",
"id": "473bfa9247befd08d7b3b5d164ddca13c29738e3",
"size": "6738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kwstandbyclient/openstack/common/jsonutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "142226"
}
],
"symlink_target": ""
} |
"""Query databases for reference metadata."""
import hashlib
import json
import sys
import urllib
import mendeley.models.catalog
import requests
import requests.auth
from .core import Entry
from .rc import rc
def search(query, m_id, m_secret):
mend = mendeley.Mendeley(m_id, m_secret)
session = mend.start_client_credentials_flow().authenticate()
if query.startswith("10.") and "/" in query:
# Interpreting as a DOI
return session.catalog.by_identifier(doi=query, view='bib')
elif query.endswith(".pdf"):
# Interpreting as a file
filehash = sha1hash(query)
try:
return session.catalog.by_identifier(filehash=filehash, view='bib')
except mendeley.exception.MendeleyException:
# Let's not show tracebacks here
sys.tracebacklimit = 0
raise NotImplementedError(
"That file not in Mendeley's catalog. Parsing PDFs for "
"metadata not implemented yet.")
else:
return session.catalog.search(query, view='bib')
def sha1hash(path):
with open(path, 'rb') as f:
sha1 = hashlib.sha1(f.read()).hexdigest()
return sha1
def doc2bib(mdoc, bib=None):
"""Converts a mendeley.CatalogBibView to an Entry."""
assert isinstance(mdoc, mendeley.models.catalog.CatalogBibView)
# Map from Mendeley type to BibTeX type
type2reftype = {
'journal': 'article',
'book': 'book',
'generic': 'misc',
'book_section': 'inbook',
'conference_proceedings': 'inproceedings',
'working_paper': 'unpublished',
'report': 'techreport',
'web_page': 'misc',
'thesis': 'phdthesis',
'magazine_article': 'misc',
'statute': 'misc',
'patent': 'misc',
'newspaper_article': 'misc',
'computer_program': 'misc',
'hearing': 'misc',
'television_broadcast': 'misc',
'encyclopedia_article': 'misc',
'case': 'misc',
'film': 'misc',
'bill': 'misc',
}
key = "%s%s" % (mdoc.authors[0].last_name.lower(), mdoc.year)
entry = Entry(key, bib=bib)
entry.reftype = type2reftype[mdoc.type]
for field in entry.allfields:
if field == 'journal':
val = getattr(mdoc, 'source')
if val is not None:
entry.set(field, val)
if hasattr(mdoc, field):
if field == "type" and entry.reftype != "techreport":
continue
val = getattr(mdoc, field)
if val is not None:
entry.set(field, val)
return entry
| {
"content_hash": "fefda0621a06b0803f927f3962b79fc8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 30.488372093023255,
"alnum_prop": 0.5873379099923722,
"repo_name": "tbekolay/refs",
"id": "33d92e2c28882a02cc26f6f55d7c9aed40e72365",
"size": "2622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refs/metadata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65762"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.