diff --git "a/codeparrot-valid_1039.txt" "b/codeparrot-valid_1039.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1039.txt" @@ -0,0 +1,10000 @@ + # get logger + if locked_by is None: + msgPfx = None + else: + msgPfx = 'id={0}'.format(locked_by) + tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_in_sub_status') + tmpLog.debug('start subStatus={0} timeColumn={1}'.format(sub_status, time_column)) + timeNow = datetime.datetime.utcnow() + # sql to count jobs being processed + sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName) + sqlC += "WHERE ({0} IS NOT NULL AND subStatus=:subStatus ".format(lock_column) + if time_column is not None and interval_with_lock is not None: + sqlC += "AND ({0} IS NOT NULL AND {0}>:lockTimeLimit) ".format(time_column) + sqlC += ") OR subStatus=:newSubStatus " + # count jobs + if max_jobs > 0 and new_sub_status is not None: + varMap = dict() + varMap[':subStatus'] = sub_status + varMap[':newSubStatus'] = new_sub_status + if time_column is not None and interval_with_lock is not None: + varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock) + self.execute(sqlC, varMap) + nProcessing, = self.cur.fetchone() + if nProcessing >= max_jobs: + # commit + self.commit() + tmpLog.debug('enough jobs {0} are being processed in {1} state'.format(nProcessing, + new_sub_status)) + return [] + max_jobs -= nProcessing + # sql to get job IDs + sql = "SELECT PandaID FROM {0} ".format(jobTableName) + sql += "WHERE subStatus=:subStatus " + if time_column is not None: + sql += "AND ({0} IS NULL ".format(time_column) + if interval_with_lock is not None: + sql += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column) + if interval_without_lock is not None: + sql += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column) + sql += ') ' + sql += "ORDER BY {0} ".format(time_column) + # sql to lock job + sqlL = "UPDATE {0} SET {1}=:timeNow,{2}=:lockedBy ".format(jobTableName, time_column, lock_column) + sqlL += "WHERE PandaID=:PandaID AND subStatus=:subStatus " + if time_column is not None: + sqlL += "AND ({0} IS NULL ".format(time_column) + if interval_with_lock is not None: + sqlL += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column) + if interval_without_lock is not None: + sqlL += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column) + sqlL += ') ' + # sql to get jobs + sqlGJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName) + sqlGJ += "WHERE PandaID=:PandaID " + # sql to get file + sqlGF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlGF += "WHERE PandaID=:PandaID AND fileType=:type " + # get jobs + varMap = dict() + varMap[':subStatus'] = sub_status + if interval_with_lock is not None: + varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock) + if interval_without_lock is not None: + varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock) + self.execute(sql, varMap) + resList = self.cur.fetchall() + pandaIDs = [] + for pandaID, in resList: + pandaIDs.append(pandaID) + # partially randomise to increase success rate for lock + nJobs = int(max_jobs * 0.2) + subPandaIDs = list(pandaIDs[nJobs:]) + random.shuffle(subPandaIDs) + pandaIDs = pandaIDs[:nJobs] + subPandaIDs + pandaIDs = pandaIDs[:max_jobs] + jobSpecList = [] + for pandaID in pandaIDs: + # lock job + if locked_by is not None: + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':timeNow'] = timeNow + varMap[':lockedBy'] = locked_by + varMap[':subStatus'] = sub_status + if interval_with_lock is not None: + varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock) + if interval_without_lock is not None: + varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock) + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + else: + nRow = 1 + if nRow > 0: + # get job + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlGJ, varMap) + resGJ = self.cur.fetchone() + # make job + jobSpec = JobSpec() + jobSpec.pack(resGJ) + if locked_by is not None: + jobSpec.lockedBy = locked_by + setattr(jobSpec, time_column, timeNow) + # get files + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':type'] = 'input' + self.execute(sqlGF, varMap) + resGF = self.cur.fetchall() + for resFile in resGF: + fileSpec = FileSpec() + fileSpec.pack(resFile) + jobSpec.add_in_file(fileSpec) + # append + jobSpecList.append(jobSpec) + tmpLog.debug('got {0} jobs'.format(len(jobSpecList))) + return jobSpecList + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # register a worker + def register_worker(self, workspec, jobspec_list, locked_by): + tmpLog = core_utils.make_logger(_logger, 'batchID={0}'.format(workspec.batchID), + method_name='register_worker') + try: + tmpLog.debug('start') + # sql to check if exists + sqlE = "SELECT 1 c FROM {0} WHERE workerID=:workerID ".format(workTableName) + # sql to insert job and worker relationship + sqlR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names()) + sqlR += JobWorkerRelationSpec.bind_values_expression() + # sql to get number of workers + sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName) + sqlNW += "WHERE t.PandaID=:pandaID AND w.workerID=t.workerID " + sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) " + # sql to decrement nNewWorkers + sqlDN = "UPDATE {0} ".format(pandaQueueTableName) + sqlDN += "SET nNewWorkers=nNewWorkers-1 " + sqlDN += "WHERE queueName=:queueName AND nNewWorkers IS NOT NULL AND nNewWorkers>0 " + # insert worker if new + isNew = False + if workspec.isNew: + varMap = dict() + varMap[':workerID'] = workspec.workerID + self.execute(sqlE, varMap) + resE = self.cur.fetchone() + if resE is None: + isNew = True + if isNew: + # insert a worker + sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names()) + sqlI += WorkSpec.bind_values_expression() + varMap = workspec.values_list() + self.execute(sqlI, varMap) + # decrement nNewWorkers + varMap = dict() + varMap[':queueName'] = workspec.computingSite + self.execute(sqlDN, varMap) + else: + # not update workerID + workspec.force_not_update('workerID') + # update a worker + sqlU = "UPDATE {0} SET {1} ".format(workTableName, workspec.bind_update_changes_expression()) + sqlU += "WHERE workerID=:workerID " + varMap = workspec.values_map(only_changed=True) + varMap[':workerID'] = workspec.workerID + self.execute(sqlU, varMap) + # collect values to update jobs or insert job/worker mapping + varMapsR = [] + if jobspec_list is not None: + for jobSpec in jobspec_list: + # get number of workers for the job + varMap = dict() + varMap[':pandaID'] = jobSpec.PandaID + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlNW, varMap) + resNW = self.cur.fetchall() + workerIDs = set() + workerIDs.add(workspec.workerID) + for tmpWorkerID, in resNW: + workerIDs.add(tmpWorkerID) + # update attributes + if jobSpec.subStatus in ['submitted', 'running']: + jobSpec.nWorkers = len(workerIDs) + try: + jobSpec.nWorkersInTotal += 1 + except Exception: + jobSpec.nWorkersInTotal = jobSpec.nWorkers + elif workspec.hasJob == 1: + if workspec.status == WorkSpec.ST_missed: + # not update if other workers are active + if len(workerIDs) > 1: + continue + core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec], + [workspec], {}, {}) + jobSpec.trigger_propagation() + else: + jobSpec.subStatus = 'submitted' + jobSpec.nWorkers = len(workerIDs) + try: + jobSpec.nWorkersInTotal += 1 + except Exception: + jobSpec.nWorkersInTotal = jobSpec.nWorkers + else: + if workspec.status == WorkSpec.ST_missed: + # not update if other workers are active + if len(workerIDs) > 1: + continue + core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec], + [workspec], {}, {}) + jobSpec.trigger_propagation() + else: + jobSpec.subStatus = 'queued' + # sql to update job + if len(jobSpec.values_map(only_changed=True)) > 0: + sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression()) + sqlJ += "WHERE PandaID=:cr_PandaID AND lockedBy=:cr_lockedBy " + # update job + varMap = jobSpec.values_map(only_changed=True) + varMap[':cr_PandaID'] = jobSpec.PandaID + varMap[':cr_lockedBy'] = locked_by + self.execute(sqlJ, varMap) + if jobSpec.subStatus in ['submitted', 'running']: + # values for job/worker mapping + jwRelation = JobWorkerRelationSpec() + jwRelation.PandaID = jobSpec.PandaID + jwRelation.workerID = workspec.workerID + varMap = jwRelation.values_list() + varMapsR.append(varMap) + # insert job/worker mapping + if len(varMapsR) > 0: + self.executemany(sqlR, varMapsR) + # commit + self.commit() + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # insert workers + def insert_workers(self, workspec_list, locked_by): + tmpLog = core_utils.make_logger(_logger, 'locked_by={0}'.format(locked_by), + method_name='insert_workers') + try: + tmpLog.debug('start') + timeNow = datetime.datetime.utcnow() + # sql to insert a worker + sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names()) + sqlI += WorkSpec.bind_values_expression() + for workSpec in workspec_list: + tmpWorkSpec = copy.copy(workSpec) + # insert worker if new + if not tmpWorkSpec.isNew: + continue + tmpWorkSpec.modificationTime = timeNow + tmpWorkSpec.status = WorkSpec.ST_pending + varMap = tmpWorkSpec.values_list() + self.execute(sqlI, varMap) + # commit + self.commit() + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # get queues to submit workers + def get_queues_to_submit(self, n_queues, lookup_interval, lock_interval, locked_by, queue_lock_interval): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_queues_to_submit') + tmpLog.debug('start') + retMap = dict() + siteName = None + resourceMap = dict() + # sql to get a site + sqlS = "SELECT siteName FROM {0} ".format(pandaQueueTableName) + sqlS += "WHERE submitTime IS NULL " + sqlS += "OR (submitTime<:lockTimeLimit AND lockedBy IS NOT NULL) " + sqlS += "OR (submitTime<:lookupTimeLimit AND lockedBy IS NULL) " + sqlS += "ORDER BY submitTime " + # sql to get queues + sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName) + sqlQ += "WHERE siteName=:siteName " + # sql to get orphaned workers + sqlO = "SELECT workerID FROM {0} ".format(workTableName) + sqlO += "WHERE computingSite=:computingSite " + sqlO += "AND status=:status AND modificationTime<:timeLimit " + # sql to delete orphaned workers. Not to use bulk delete to avoid deadlock with 0-record deletion + sqlD = "DELETE FROM {0} ".format(workTableName) + sqlD += "WHERE workerID=:workerID " + # sql to count nQueue + sqlN = "SELECT status,COUNT(*) cnt FROM {0} ".format(workTableName) + sqlN += "WHERE computingSite=:computingSite " + # sql to count re-fillers + sqlR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName) + sqlR += "WHERE computingSite=:computingSite AND status=:status " + sqlR += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0 " + # sql to update timestamp and lock site + sqlU = "UPDATE {0} SET submitTime=:submitTime,lockedBy=:lockedBy ".format(pandaQueueTableName) + sqlU += "WHERE siteName=:siteName " + sqlU += "AND (submitTime IS NULL OR submitTime<:timeLimit) " + # get sites + timeNow = datetime.datetime.utcnow() + varMap = dict() + varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=queue_lock_interval) + varMap[':lookupTimeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval) + self.execute(sqlS, varMap) + resS = self.cur.fetchall() + for siteName, in resS: + # update timestamp to lock the site + varMap = dict() + varMap[':siteName'] = siteName + varMap[':submitTime'] = timeNow + varMap[':lockedBy'] = locked_by + varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval) + self.execute(sqlU, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + # skip if not locked + if nRow == 0: + continue + # get queues + varMap = dict() + varMap[':siteName'] = siteName + self.execute(sqlQ, varMap) + resQ = self.cur.fetchall() + for queueName, resourceType, nNewWorkers in resQ: + # delete orphaned workers + varMap = dict() + varMap[':computingSite'] = queueName + varMap[':status'] = WorkSpec.ST_pending + varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lock_interval) + sqlO_tmp = sqlO + if resourceType != 'ANY': + varMap[':resourceType'] = resourceType + sqlO_tmp += "AND resourceType=:resourceType " + self.execute(sqlO_tmp, varMap) + resO = self.cur.fetchall() + for tmpWorkerID, in resO: + varMap = dict() + varMap[':workerID'] = tmpWorkerID + self.execute(sqlD, varMap) + # commit + self.commit() + # count nQueue + varMap = dict() + varMap[':computingSite'] = queueName + varMap[':resourceType'] = resourceType + sqlN_tmp = sqlN + if resourceType != 'ANY': + varMap[':resourceType'] = resourceType + sqlN_tmp += "AND resourceType=:resourceType " + sqlN_tmp += "GROUP BY status " + self.execute(sqlN_tmp, varMap) + nQueue = 0 + nReady = 0 + nRunning = 0 + for workerStatus, tmpNum in self.cur.fetchall(): + if workerStatus in [WorkSpec.ST_submitted, WorkSpec.ST_pending, WorkSpec.ST_idle]: + nQueue += tmpNum + elif workerStatus in [WorkSpec.ST_ready]: + nReady += tmpNum + elif workerStatus in [WorkSpec.ST_running]: + nRunning += tmpNum + # count nFillers + varMap = dict() + varMap[':computingSite'] = queueName + varMap[':status'] = WorkSpec.ST_running + sqlR_tmp = sqlR + if resourceType != 'ANY': + varMap[':resourceType'] = resourceType + sqlR_tmp += "AND resourceType=:resourceType " + self.execute(sqlR_tmp, varMap) + nReFill, = self.cur.fetchone() + nReady += nReFill + # add + retMap.setdefault(queueName, {}) + retMap[queueName][resourceType] = {'nReady': nReady, + 'nRunning': nRunning, + 'nQueue': nQueue, + 'nNewWorkers': nNewWorkers} + resourceMap[resourceType] = queueName + # enough queues + if len(retMap) >= 0: + break + tmpLog.debug('got retMap {0}'.format(str(retMap))) + tmpLog.debug('got siteName {0}'.format(str(siteName))) + tmpLog.debug('got resourceMap {0}'.format(str(resourceMap))) + return retMap, siteName, resourceMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {}, None, {} + + # get job chunks to make workers + def get_job_chunks_for_workers(self, queue_name, n_workers, n_ready, n_jobs_per_worker, n_workers_per_job, + use_job_late_binding, check_interval, lock_interval, locked_by, + allow_job_mixture=False, max_workers_per_job_in_total=None, + max_workers_per_job_per_cycle=None): + toCommit = False + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name), + method_name='get_job_chunks_for_workers') + tmpLog.debug('start') + # define maxJobs + if n_jobs_per_worker is not None: + maxJobs = (n_workers + n_ready) * n_jobs_per_worker + else: + maxJobs = -(-(n_workers + n_ready) // n_workers_per_job) + # core part of sql + # submitted and running are for multi-workers + sqlCore = "WHERE (subStatus IN (:subStat1,:subStat2) OR (subStatus IN (:subStat3,:subStat4) " + sqlCore += "AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL AND nWorkers1 + nAvailableJobs = None + if n_jobs_per_worker is not None and n_jobs_per_worker > 1: + toCommit = True + # sql to count jobs + sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName) + sqlC += sqlCore + # count jobs + varMap = dict() + varMap[':subStat1'] = 'prepared' + varMap[':subStat2'] = 'queued' + varMap[':subStat3'] = 'submitted' + varMap[':subStat4'] = 'running' + varMap[':queueName'] = queue_name + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':checkTimeLimit'] = checkTimeLimit + self.execute(sqlC, varMap) + nAvailableJobs, = self.cur.fetchone() + maxJobs = int(min(maxJobs, nAvailableJobs) / n_jobs_per_worker) * n_jobs_per_worker + tmpStr = 'n_workers={0} n_ready={1} '.format(n_workers, n_ready) + tmpStr += 'n_jobs_per_worker={0} n_workers_per_job={1} '.format(n_jobs_per_worker, n_workers_per_job) + tmpStr += 'n_ava_jobs={0}'.format(nAvailableJobs) + tmpLog.debug(tmpStr) + if maxJobs == 0: + tmpStr = 'skip due to maxJobs=0' + tmpLog.debug(tmpStr) + else: + # get job IDs + varMap = dict() + varMap[':subStat1'] = 'prepared' + varMap[':subStat2'] = 'queued' + varMap[':subStat3'] = 'submitted' + varMap[':subStat4'] = 'running' + varMap[':queueName'] = queue_name + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':checkTimeLimit'] = checkTimeLimit + self.execute(sqlP, varMap) + resP = self.cur.fetchall() + tmpStr = 'fetched {0} jobs'.format(len(resP)) + tmpLog.debug(tmpStr) + jobChunk = [] + iJobs = 0 + for pandaID, in resP: + toCommit = True + toEscape = False + # lock job + varMap = dict() + varMap[':subStat1'] = 'prepared' + varMap[':subStat2'] = 'queued' + varMap[':subStat3'] = 'submitted' + varMap[':subStat4'] = 'running' + varMap[':queueName'] = queue_name + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':checkTimeLimit'] = checkTimeLimit + varMap[':PandaID'] = pandaID + varMap[':timeNow'] = timeNow + varMap[':lockedBy'] = locked_by + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + if nRow > 0: + iJobs += 1 + # get job + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + # make job + jobSpec = JobSpec() + jobSpec.pack(resJ) + jobSpec.lockedBy = locked_by + # get files + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':type'] = 'input' + self.execute(sqlGF, varMap) + resGF = self.cur.fetchall() + for resFile in resGF: + fileSpec = FileSpec() + fileSpec.pack(resFile) + jobSpec.add_in_file(fileSpec) + # new chunk + if len(jobChunk) > 0 and jobChunk[0].taskID != jobSpec.taskID and not allow_job_mixture: + tmpLog.debug('new chunk with {0} jobs due to taskID change'.format(len(jobChunk))) + jobChunkList.append(jobChunk) + jobChunk = [] + # only prepared for new worker + if len(jobChunkList) >= n_ready and jobSpec.subStatus == 'queued': + toCommit = False + else: + jobChunk.append(jobSpec) + # enough jobs in chunk + if n_jobs_per_worker is not None and len(jobChunk) >= n_jobs_per_worker: + tmpLog.debug('new chunk with {0} jobs due to n_jobs_per_worker'.format(len(jobChunk))) + jobChunkList.append(jobChunk) + jobChunk = [] + # one job per multiple workers + elif n_workers_per_job is not None: + if jobSpec.nWorkersLimit is None: + jobSpec.nWorkersLimit = n_workers_per_job + if max_workers_per_job_in_total is not None: + jobSpec.maxWorkersInTotal = max_workers_per_job_in_total + nMultiWorkers = min(jobSpec.nWorkersLimit - jobSpec.nWorkers, + n_workers - len(jobChunkList)) + if jobSpec.maxWorkersInTotal is not None and jobSpec.nWorkersInTotal is not None: + nMultiWorkers = min(nMultiWorkers, + jobSpec.maxWorkersInTotal - jobSpec.nWorkersInTotal) + if max_workers_per_job_per_cycle is not None: + nMultiWorkers = min(nMultiWorkers, max_workers_per_job_per_cycle) + if nMultiWorkers < 0: + nMultiWorkers = 0 + tmpLog.debug( + 'new {0} chunks with {1} jobs due to n_workers_per_job'.format(nMultiWorkers, + len(jobChunk))) + for i in range(nMultiWorkers): + jobChunkList.append(jobChunk) + jobChunk = [] + # enough job chunks + if len(jobChunkList) >= n_workers: + toEscape = True + if toCommit: + self.commit() + else: + self.rollback() + if toEscape or iJobs >= maxJobs: + break + tmpLog.debug('got {0} job chunks'.format(len(jobChunkList))) + return jobChunkList + except Exception: + # roll back + if toCommit: + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # get workers to monitor + def get_workers_to_update(self, max_workers, check_interval, lock_interval, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_update') + tmpLog.debug('start') + # sql to get workers + sqlW = "SELECT workerID,configID,mapType FROM {0} ".format(workTableName) + sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) " + sqlW += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) " + sqlW += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) " + sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers) + # sql to lock worker without time check + sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName) + sqlL += "WHERE workerID=:workerID " + # sql to update modificationTime + sqlLM = "UPDATE {0} SET modificationTime=:timeNow ".format(workTableName) + sqlLM += "WHERE workerID=:workerID " + # sql to lock worker with time check + sqlLT = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName) + sqlLT += "WHERE workerID=:workerID " + sqlLT += "AND status IN (:st_submitted,:st_running,:st_idle) " + sqlLT += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) " + sqlLT += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) " + # sql to get associated workerIDs + sqlA = "SELECT t.workerID FROM {0} t, {0} s, {1} w ".format(jobWorkerTableName, workTableName) + sqlA += "WHERE s.PandaID=t.PandaID AND s.workerID=:workerID " + sqlA += "AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) " + # sql to get associated workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + # sql to get associated PandaIDs + sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName) + sqlP += "WHERE workerID=:workerID " + # get workerIDs + timeNow = datetime.datetime.utcnow() + lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval) + checkTimeLimit = timeNow - datetime.timedelta(seconds=check_interval) + varMap = dict() + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':checkTimeLimit'] = checkTimeLimit + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + tmpWorkers = set() + for workerID, configID, mapType in resW: + # ignore configID + if not core_utils.dynamic_plugin_change(): + configID = None + tmpWorkers.add((workerID, configID, mapType)) + checkedIDs = set() + retVal = {} + for workerID, configID, mapType in tmpWorkers: + # skip + if workerID in checkedIDs: + continue + # get associated workerIDs + varMap = dict() + varMap[':workerID'] = workerID + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlA, varMap) + resA = self.cur.fetchall() + workerIDtoScan = set() + for tmpWorkID, in resA: + workerIDtoScan.add(tmpWorkID) + # add original ID just in case since no relation when job is not yet bound + workerIDtoScan.add(workerID) + # use only the largest worker to avoid updating the same worker set concurrently + if mapType == WorkSpec.MT_MultiWorkers: + if workerID != min(workerIDtoScan): + # update modification time + varMap = dict() + varMap[':workerID'] = workerID + varMap[':timeNow'] = timeNow + self.execute(sqlLM, varMap) + # commit + self.commit() + continue + # lock worker + varMap = dict() + varMap[':workerID'] = workerID + varMap[':lockedBy'] = locked_by + varMap[':timeNow'] = timeNow + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':checkTimeLimit'] = checkTimeLimit + self.execute(sqlLT, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + # skip if not locked + if nRow == 0: + continue + # get workers + queueName = None + workersList = [] + for tmpWorkID in workerIDtoScan: + checkedIDs.add(tmpWorkID) + # get worker + varMap = dict() + varMap[':workerID'] = tmpWorkID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + if queueName is None: + queueName = workSpec.computingSite + workersList.append(workSpec) + # get associated PandaIDs + varMap = dict() + varMap[':workerID'] = tmpWorkID + self.execute(sqlP, varMap) + resP = self.cur.fetchall() + workSpec.pandaid_list = [] + for tmpPandaID, in resP: + workSpec.pandaid_list.append(tmpPandaID) + if len(workSpec.pandaid_list) > 0: + workSpec.nJobs = len(workSpec.pandaid_list) + # lock worker + if tmpWorkID != workerID: + varMap = dict() + varMap[':workerID'] = tmpWorkID + varMap[':lockedBy'] = locked_by + varMap[':timeNow'] = timeNow + self.execute(sqlL, varMap) + workSpec.lockedBy = locked_by + workSpec.force_not_update('lockedBy') + # commit + self.commit() + # add + if queueName is not None: + retVal.setdefault(queueName, dict()) + retVal[queueName].setdefault(configID, []) + retVal[queueName][configID].append(workersList) + tmpLog.debug('got {0}'.format(str(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get workers to propagate + def get_workers_to_propagate(self, max_workers, check_interval): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_propagate') + tmpLog.debug('start') + # sql to get worker IDs + sqlW = "SELECT workerID FROM {0} ".format(workTableName) + sqlW += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit " + sqlW += "ORDER BY lastUpdate " + # sql to lock worker + sqlL = "UPDATE {0} SET lastUpdate=:timeNow ".format(workTableName) + sqlL += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit " + sqlL += "AND workerID=:workerID " + # sql to get associated PandaIDs + sqlA = "SELECT PandaID FROM {0} ".format(jobWorkerTableName) + sqlA += "WHERE workerID=:workerID " + # sql to get workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + timeNow = datetime.datetime.utcnow() + timeLimit = timeNow - datetime.timedelta(seconds=check_interval) + # get workerIDs + varMap = dict() + varMap[':checkTimeLimit'] = timeLimit + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + tmpWorkers = [] + for workerID, in resW: + tmpWorkers.append(workerID) + # partially randomize to increase hit rate + nWorkers = int(max_workers * 0.2) + subTmpWorkers = list(tmpWorkers[nWorkers:]) + random.shuffle(subTmpWorkers) + tmpWorkers = tmpWorkers[:nWorkers] + subTmpWorkers + tmpWorkers = tmpWorkers[:max_workers] + retVal = [] + for workerID in tmpWorkers: + # lock worker + varMap = dict() + varMap[':workerID'] = workerID + varMap[':timeNow'] = timeNow + varMap[':checkTimeLimit'] = timeLimit + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + if nRow > 0: + # get worker + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + retVal.append(workSpec) + # get associated PandaIDs + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlA, varMap) + resA = self.cur.fetchall() + workSpec.pandaid_list = [] + for pandaID, in resA: + workSpec.pandaid_list.append(pandaID) + # commit + self.commit() + tmpLog.debug('got {0} workers'.format(len(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get workers to feed events + def get_workers_to_feed_events(self, max_workers, lock_interval, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_feed_events') + tmpLog.debug('start') + # sql to get workers + sqlW = "SELECT workerID, status FROM {0} ".format(workTableName) + sqlW += "WHERE eventsRequest=:eventsRequest AND status IN (:status1,:status2) " + sqlW += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) " + sqlW += "ORDER BY eventFeedTime LIMIT {0} ".format(max_workers) + # sql to lock worker + sqlL = "UPDATE {0} SET eventFeedTime=:timeNow,eventFeedLock=:lockedBy ".format(workTableName) + sqlL += "WHERE eventsRequest=:eventsRequest AND status=:status " + sqlL += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) " + sqlL += "AND workerID=:workerID " + # sql to get associated workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + # get workerIDs + timeNow = datetime.datetime.utcnow() + lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval) + varMap = dict() + varMap[':status1'] = WorkSpec.ST_running + varMap[':status2'] = WorkSpec.ST_submitted + varMap[':eventsRequest'] = WorkSpec.EV_requestEvents + varMap[':lockTimeLimit'] = lockTimeLimit + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + tmpWorkers = dict() + for tmpWorkerID, tmpWorkStatus in resW: + tmpWorkers[tmpWorkerID] = tmpWorkStatus + retVal = {} + for workerID, workStatus in iteritems(tmpWorkers): + # lock worker + varMap = dict() + varMap[':workerID'] = workerID + varMap[':timeNow'] = timeNow + varMap[':status'] = workStatus + varMap[':eventsRequest'] = WorkSpec.EV_requestEvents + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':lockedBy'] = locked_by + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + # skip if not locked + if nRow == 0: + continue + # get worker + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + if workSpec.computingSite not in retVal: + retVal[workSpec.computingSite] = [] + retVal[workSpec.computingSite].append(workSpec) + tmpLog.debug('got {0} workers'.format(len(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # update jobs and workers + def update_jobs_workers(self, jobspec_list, workspec_list, locked_by, panda_ids_list=None): + try: + timeNow = datetime.datetime.utcnow() + # sql to check job + sqlCJ = "SELECT status FROM {0} WHERE PandaID=:PandaID FOR UPDATE ".format(jobTableName) + # sql to check file + sqlFC = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlFC += "WHERE PandaID=:PandaID AND lfn=:lfn " + # sql to get all LFNs + sqlFL = "SELECT lfn FROM {0} ".format(fileTableName) + sqlFL += "WHERE PandaID=:PandaID AND fileType<>:type " + # sql to check file with eventRangeID + sqlFE = "SELECT 1 c FROM {0} ".format(fileTableName) + sqlFE += "WHERE PandaID=:PandaID AND lfn=:lfn AND eventRangeID=:eventRangeID ".format(fileTableName) + # sql to insert file + sqlFI = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names()) + sqlFI += FileSpec.bind_values_expression() + # sql to get pending files + sqlFP = "SELECT fileID,fsize,lfn FROM {0} ".format(fileTableName) + sqlFP += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type " + # sql to get provenanceID,workerID for pending files + sqlPW = "SELECT SUM(fsize),provenanceID,workerID FROM {0} ".format(fileTableName) + sqlPW += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type " + sqlPW += "GROUP BY provenanceID,workerID " + # sql to update pending files + sqlFU = "UPDATE {0} ".format(fileTableName) + sqlFU += "SET status=:status,zipFileID=:zipFileID " + sqlFU += "WHERE fileID=:fileID " + # sql to check event + sqlEC = "SELECT eventRangeID,eventStatus FROM {0} ".format(eventTableName) + sqlEC += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL " + # sql to check associated file + sqlEF = "SELECT eventRangeID,status FROM {0} ".format(fileTableName) + sqlEF += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL " + # sql to insert event + sqlEI = "INSERT INTO {0} ({1}) ".format(eventTableName, EventSpec.column_names()) + sqlEI += EventSpec.bind_values_expression() + # sql to update event + sqlEU = "UPDATE {0} ".format(eventTableName) + sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus " + sqlEU += "WHERE PandaID=:PandaID AND eventRangeID=:eventRangeID " + # sql to check if relationship is already available + sqlCR = "SELECT 1 c FROM {0} WHERE PandaID=:PandaID AND workerID=:workerID ".format(jobWorkerTableName) + # sql to insert job and worker relationship + sqlIR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names()) + sqlIR += JobWorkerRelationSpec.bind_values_expression() + # count number of workers + sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName) + sqlNW += "WHERE t.PandaID=:PandaID AND w.workerID=t.workerID " + sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) " + # update job + if jobspec_list is not None: + if len(workspec_list) > 0 and workspec_list[0].mapType == WorkSpec.MT_MultiWorkers: + isMultiWorkers = True + else: + isMultiWorkers = False + for jobSpec in jobspec_list: + tmpLog = core_utils.make_logger(_logger, 'PandaID={0} by {1}'.format(jobSpec.PandaID, locked_by), + method_name='update_jobs_workers') + # check job + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + self.execute(sqlCJ, varMap) + resCJ = self.cur.fetchone() + tmpJobStatus, = resCJ + # don't update cancelled jobs + if tmpJobStatus == ['cancelled']: + pass + else: + # get nWorkers + tmpLog.debug('start') + activeWorkers = set() + if isMultiWorkers: + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlNW, varMap) + resNW = self.cur.fetchall() + for tmpWorkerID, in resNW: + activeWorkers.add(tmpWorkerID) + jobSpec.nWorkers = len(activeWorkers) + # get all LFNs + allLFNs = set() + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':type'] = 'input' + self.execute(sqlFL, varMap) + resFL = self.cur.fetchall() + for tmpLFN, in resFL: + allLFNs.add(tmpLFN) + # insert files + nFiles = 0 + fileIdMap = {} + zipFileRes = dict() + for fileSpec in jobSpec.outFiles: + # insert file + if fileSpec.lfn not in allLFNs: + if jobSpec.zipPerMB is None or fileSpec.isZip in [0, 1]: + fileSpec.status = 'defined' + jobSpec.hasOutFile = JobSpec.HO_hasOutput + else: + fileSpec.status = 'pending' + varMap = fileSpec.values_list() + self.execute(sqlFI, varMap) + fileSpec.fileID = self.cur.lastrowid + nFiles += 1 + # mapping between event range ID and file ID + if fileSpec.eventRangeID is not None: + fileIdMap[fileSpec.eventRangeID] = fileSpec.fileID + # associate to itself + if fileSpec.isZip == 1: + varMap = dict() + varMap[':status'] = fileSpec.status + varMap[':fileID'] = fileSpec.fileID + varMap[':zipFileID'] = fileSpec.fileID + self.execute(sqlFU, varMap) + elif fileSpec.isZip == 1 and fileSpec.eventRangeID is not None: + # add a fake file with eventRangeID which has the same lfn/zipFileID as zip file + varMap = dict() + varMap[':PandaID'] = fileSpec.PandaID + varMap[':lfn'] = fileSpec.lfn + varMap[':eventRangeID'] = fileSpec.eventRangeID + self.execute(sqlFE, varMap) + resFE = self.cur.fetchone() + if resFE is None: + if fileSpec.lfn not in zipFileRes: + # get file + varMap = dict() + varMap[':PandaID'] = fileSpec.PandaID + varMap[':lfn'] = fileSpec.lfn + self.execute(sqlFC, varMap) + resFC = self.cur.fetchone() + zipFileRes[fileSpec.lfn] = resFC + # associate to existing zip + resFC = zipFileRes[fileSpec.lfn] + zipFileSpec = FileSpec() + zipFileSpec.pack(resFC) + fileSpec.status = 'zipped' + fileSpec.zipFileID = zipFileSpec.zipFileID + varMap = fileSpec.values_list() + self.execute(sqlFI, varMap) + nFiles += 1 + # mapping between event range ID and file ID + fileIdMap[fileSpec.eventRangeID] = self.cur.lastrowid + if nFiles > 0: + tmpLog.debug('inserted {0} files'.format(nFiles)) + # check pending files + if jobSpec.zipPerMB is not None and \ + not (jobSpec.zipPerMB == 0 and jobSpec.subStatus != 'to_transfer'): + # get workerID and provenanceID of pending files + zippedFileIDs = [] + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':status'] = 'pending' + varMap[':type'] = 'input' + self.execute(sqlPW, varMap) + resPW = self.cur.fetchall() + for subTotalSize, tmpProvenanceID, tmpWorkerID in resPW: + if jobSpec.subStatus == 'to_transfer' \ + or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024) \ + or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers): + sqlFPx = sqlFP + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':status'] = 'pending' + varMap[':type'] = 'input' + if tmpProvenanceID is None: + sqlFPx += 'AND provenanceID IS NULL ' + else: + varMap[':provenanceID'] = tmpProvenanceID + sqlFPx += 'AND provenanceID=:provenanceID ' + if tmpWorkerID is None: + sqlFPx += 'AND workerID IS NULL ' + else: + varMap[':workerID'] = tmpWorkerID + sqlFPx += 'AND workerID=:workerID' + # get pending files + self.execute(sqlFPx, varMap) + resFP = self.cur.fetchall() + tmpLog.debug('got {0} pending files for workerID={1} provenanceID={2}'.format( + len(resFP), + tmpWorkerID, + tmpProvenanceID)) + # make subsets + subTotalSize = 0 + subFileIDs = [] + for tmpFileID, tmpFsize, tmpLFN in resFP: + if jobSpec.zipPerMB > 0 and subTotalSize > 0 \ + and (subTotalSize + tmpFsize > jobSpec.zipPerMB * 1024 * 1024): + zippedFileIDs.append(subFileIDs) + subFileIDs = [] + subTotalSize = 0 + subTotalSize += tmpFsize + subFileIDs.append((tmpFileID, tmpLFN)) + if (jobSpec.subStatus == 'to_transfer' + or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024) + or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers)) \ + and len(subFileIDs) > 0: + zippedFileIDs.append(subFileIDs) + # make zip files + for subFileIDs in zippedFileIDs: + # insert zip file + fileSpec = FileSpec() + fileSpec.status = 'zipping' + fileSpec.lfn = 'panda.' + subFileIDs[0][-1] + '.zip' + fileSpec.scope = 'panda' + fileSpec.fileType = 'zip_output' + fileSpec.PandaID = jobSpec.PandaID + fileSpec.taskID = jobSpec.taskID + fileSpec.isZip = 1 + varMap = fileSpec.values_list() + self.execute(sqlFI, varMap) + # update pending files + varMaps = [] + for tmpFileID, tmpLFN in subFileIDs: + varMap = dict() + varMap[':status'] = 'zipped' + varMap[':fileID'] = tmpFileID + varMap[':zipFileID'] = self.cur.lastrowid + varMaps.append(varMap) + self.executemany(sqlFU, varMaps) + # set zip output flag + if len(zippedFileIDs) > 0: + jobSpec.hasOutFile = JobSpec.HO_hasZipOutput + # get event ranges and file stat + eventFileStat = dict() + eventRangesSet = set() + doneEventRangesSet = set() + if len(jobSpec.events) > 0: + # get event ranges + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + self.execute(sqlEC, varMap) + resEC = self.cur.fetchall() + for tmpEventRangeID, tmpEventStatus in resEC: + if tmpEventStatus in ['running']: + eventRangesSet.add(tmpEventRangeID) + else: + doneEventRangesSet.add(tmpEventRangeID) + # check associated file + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + self.execute(sqlEF, varMap) + resEF = self.cur.fetchall() + for tmpEventRangeID, tmpStat in resEF: + eventFileStat[tmpEventRangeID] = tmpStat + # insert or update events + varMapsEI = [] + varMapsEU = [] + for eventSpec in jobSpec.events: + # already done + if eventSpec.eventRangeID in doneEventRangesSet: + continue + # set subStatus + if eventSpec.eventStatus == 'finished': + # check associated file + if eventSpec.eventRangeID not in eventFileStat or \ + eventFileStat[eventSpec.eventRangeID] == 'finished': + eventSpec.subStatus = 'finished' + elif eventFileStat[eventSpec.eventRangeID] == 'failed': + eventSpec.eventStatus = 'failed' + eventSpec.subStatus = 'failed' + else: + eventSpec.subStatus = 'transferring' + else: + eventSpec.subStatus = eventSpec.eventStatus + # set fileID + if eventSpec.eventRangeID in fileIdMap: + eventSpec.fileID = fileIdMap[eventSpec.eventRangeID] + # insert or update event + if eventSpec.eventRangeID not in eventRangesSet: + varMap = eventSpec.values_list() + varMapsEI.append(varMap) + else: + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':eventRangeID'] = eventSpec.eventRangeID + varMap[':eventStatus'] = eventSpec.eventStatus + varMap[':subStatus'] = eventSpec.subStatus + varMapsEU.append(varMap) + if len(varMapsEI) > 0: + self.executemany(sqlEI, varMapsEI) + tmpLog.debug('inserted {0} event'.format(len(varMapsEI))) + if len(varMapsEU) > 0: + self.executemany(sqlEU, varMapsEU) + tmpLog.debug('updated {0} event'.format(len(varMapsEU))) + # update job + varMap = jobSpec.values_map(only_changed=True) + if len(varMap) > 0: + tmpLog.debug('update job') + # sql to update job + sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression()) + sqlJ += "WHERE PandaID=:PandaID " + jobSpec.lockedBy = None + jobSpec.modificationTime = timeNow + varMap = jobSpec.values_map(only_changed=True) + varMap[':PandaID'] = jobSpec.PandaID + self.execute(sqlJ, varMap) + nRow = self.cur.rowcount + tmpLog.debug('done with {0}'.format(nRow)) + tmpLog.debug('all done for job') + # commit + self.commit() + # update worker + retVal = True + for idxW, workSpec in enumerate(workspec_list): + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workSpec.workerID), + method_name='update_jobs_workers') + tmpLog.debug('update worker') + workSpec.lockedBy = None + if workSpec.status == WorkSpec.ST_running and workSpec.startTime is None: + workSpec.startTime = timeNow + elif workSpec.is_final_status(): + if workSpec.startTime is None: + workSpec.startTime = timeNow + if workSpec.endTime is None: + workSpec.endTime = timeNow + if not workSpec.nextLookup: + if workSpec.has_updated_attributes(): + workSpec.modificationTime = timeNow + else: + workSpec.nextLookup = False + # sql to update worker + sqlW = "UPDATE {0} SET {1} ".format(workTableName, workSpec.bind_update_changes_expression()) + sqlW += "WHERE workerID=:workerID AND lockedBy=:cr_lockedBy " + sqlW += "AND (status NOT IN (:st1,:st2,:st3,:st4)) " + varMap = workSpec.values_map(only_changed=True) + if len(varMap) > 0: + varMap[':workerID'] = workSpec.workerID + varMap[':cr_lockedBy'] = locked_by + varMap[':st1'] = WorkSpec.ST_cancelled + varMap[':st2'] = WorkSpec.ST_finished + varMap[':st3'] = WorkSpec.ST_failed + varMap[':st4'] = WorkSpec.ST_missed + self.execute(sqlW, varMap) + nRow = self.cur.rowcount + tmpLog.debug('done with {0}'.format(nRow)) + if nRow == 0: + retVal = False + # insert relationship if necessary + if panda_ids_list is not None and len(panda_ids_list) > idxW: + varMapsIR = [] + for pandaID in panda_ids_list[idxW]: + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':workerID'] = workSpec.workerID + self.execute(sqlCR, varMap) + resCR = self.cur.fetchone() + if resCR is None: + jwRelation = JobWorkerRelationSpec() + jwRelation.PandaID = pandaID + jwRelation.workerID = workSpec.workerID + varMap = jwRelation.values_list() + varMapsIR.append(varMap) + if len(varMapsIR) > 0: + self.executemany(sqlIR, varMapsIR) + tmpLog.debug('all done for worker') + # commit + self.commit() + # return + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get jobs with workerID + def get_jobs_with_worker_id(self, worker_id, locked_by, with_file=False, only_running=False, slim=False): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id), + method_name='get_jobs_with_worker_id') + tmpLog.debug('start') + # sql to get PandaIDs + sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName) + sqlP += "WHERE workerID=:workerID " + # sql to get jobs + sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=slim), jobTableName) + sqlJ += "WHERE PandaID=:PandaID " + # sql to get job parameters + sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName) + sqlJJ += "WHERE PandaID=:PandaID " + # sql to lock job + sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(jobTableName) + sqlL += "WHERE PandaID=:PandaID " + # sql to get files + sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlF += "WHERE PandaID=:PandaID AND zipFileID IS NULL " + # get jobs + jobChunkList = [] + timeNow = datetime.datetime.utcnow() + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlP, varMap) + resW = self.cur.fetchall() + for pandaID, in resW: + # get job + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + # make job + jobSpec = JobSpec() + jobSpec.pack(resJ, slim=slim) + if only_running and jobSpec.subStatus not in ['running', 'submitted', 'queued', 'idle']: + continue + jobSpec.lockedBy = locked_by + # for old jobs without extractions + if jobSpec.jobParamsExtForLog is None: + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJJ, varMap) + resJJ = self.cur.fetchone() + jobSpec.set_blob_attribute('jobParams', resJJ[0]) + jobSpec.get_output_file_attributes() + jobSpec.get_logfile_info() + # lock job + if locked_by is not None: + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':lockedBy'] = locked_by + varMap[':timeNow'] = timeNow + self.execute(sqlL, varMap) + # get files + if with_file: + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlF, varMap) + resFileList = self.cur.fetchall() + for resFile in resFileList: + fileSpec = FileSpec() + fileSpec.pack(resFile) + jobSpec.add_file(fileSpec) + # append + jobChunkList.append(jobSpec) + # commit + self.commit() + tmpLog.debug('got {0} job chunks'.format(len(jobChunkList))) + return jobChunkList + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # get ready workers + def get_ready_workers(self, queue_name, n_ready): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name), + method_name='get_ready_workers') + tmpLog.debug('start') + # sql to get workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE computingSite=:queueName AND (status=:status_ready OR (status=:status_running " + sqlG += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0)) " + sqlG += "ORDER BY modificationTime LIMIT {0} ".format(n_ready) + # sql to get associated PandaIDs + sqlP = "SELECT COUNT(*) cnt FROM {0} ".format(jobWorkerTableName) + sqlP += "WHERE workerID=:workerID " + # get workers + varMap = dict() + varMap[':status_ready'] = WorkSpec.ST_ready + varMap[':status_running'] = WorkSpec.ST_running + varMap[':queueName'] = queue_name + self.execute(sqlG, varMap) + resList = self.cur.fetchall() + retVal = [] + for res in resList: + workSpec = WorkSpec() + workSpec.pack(res) + # get number of jobs + varMap = dict() + varMap[':workerID'] = workSpec.workerID + self.execute(sqlP, varMap) + resP = self.cur.fetchone() + if resP is not None and resP[0] > 0: + workSpec.nJobs = resP[0] + retVal.append(workSpec) + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # get a worker + def get_worker_with_id(self, worker_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id), + method_name='get_worker_with_id') + tmpLog.debug('start') + # sql to get a worker + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + # get a worker + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlG, varMap) + res = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(res) + # commit + self.commit() + tmpLog.debug('got') + return workSpec + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # get jobs to trigger or check output transfer or zip output + def get_jobs_for_stage_out(self, max_jobs, interval_without_lock, interval_with_lock, locked_by, + sub_status, has_out_file_flag, bad_has_out_file_flag=None, + max_files_per_job=None): + try: + # get logger + msgPfx = 'thr={0}'.format(locked_by) + tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_for_stage_out') + tmpLog.debug('start') + # sql to get PandaIDs without FOR UPDATE which causes deadlock in MariaDB + sql = "SELECT PandaID FROM {0} ".format(jobTableName) + sql += "WHERE " + sql += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) " + if bad_has_out_file_flag is not None: + sql += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) " + sql += "AND (stagerTime IS NULL " + sql += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) " + sql += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) " + sql += ") " + sql += "ORDER BY stagerTime " + sql += "LIMIT {0} ".format(max_jobs) + # sql to lock job + sqlL = "UPDATE {0} SET stagerTime=:timeNow,stagerLock=:lockedBy ".format(jobTableName) + sqlL += "WHERE PandaID=:PandaID AND " + sqlL += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) " + if bad_has_out_file_flag is not None: + sqlL += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) " + sqlL += "AND (stagerTime IS NULL " + sqlL += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) " + sqlL += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) " + sqlL += ") " + # sql to get job + sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=True), jobTableName) + sqlJ += "WHERE PandaID=:PandaID " + # sql to get job parameters + sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName) + sqlJJ += "WHERE PandaID=:PandaID " + # sql to get files + sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlF += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type " + if max_files_per_job is not None and max_files_per_job > 0: + sqlF += "LIMIT {0} ".format(max_files_per_job) + # sql to get associated files + sqlAF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlAF += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID AND fileType<>:type " + # sql to increment attempt number + sqlFU = "UPDATE {0} SET attemptNr=attemptNr+1 WHERE fileID=:fileID ".format(fileTableName) + # get jobs + timeNow = datetime.datetime.utcnow() + lockTimeLimit = timeNow - datetime.timedelta(seconds=interval_with_lock) + updateTimeLimit = timeNow - datetime.timedelta(seconds=interval_without_lock) + varMap = dict() + varMap[':subStatus'] = sub_status + varMap[':hasOutFile'] = has_out_file_flag + if bad_has_out_file_flag is not None: + varMap[':badHasOutFile'] = bad_has_out_file_flag + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':updateTimeLimit'] = updateTimeLimit + self.execute(sql, varMap) + resList = self.cur.fetchall() + jobSpecList = [] + for pandaID, in resList: + # lock job + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':timeNow'] = timeNow + varMap[':lockedBy'] = locked_by + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':updateTimeLimit'] = updateTimeLimit + varMap[':subStatus'] = sub_status + varMap[':hasOutFile'] = has_out_file_flag + if bad_has_out_file_flag is not None: + varMap[':badHasOutFile'] = bad_has_out_file_flag + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + if nRow > 0: + # get job + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + # make job + jobSpec = JobSpec() + jobSpec.pack(resJ, slim=True) + jobSpec.stagerLock = locked_by + jobSpec.stagerTime = timeNow + # for old jobs without extractions + if jobSpec.jobParamsExtForLog is None: + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJJ, varMap) + resJJ = self.cur.fetchone() + jobSpec.set_blob_attribute('jobParams', resJJ[0]) + jobSpec.get_output_file_attributes() + jobSpec.get_logfile_info() + # get files + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + varMap[':type'] = 'input' + if has_out_file_flag == JobSpec.HO_hasOutput: + varMap[':status'] = 'defined' + elif has_out_file_flag == JobSpec.HO_hasZipOutput: + varMap[':status'] = 'zipping' + else: + varMap[':status'] = 'transferring' + self.execute(sqlF, varMap) + resFileList = self.cur.fetchall() + for resFile in resFileList: + fileSpec = FileSpec() + fileSpec.pack(resFile) + fileSpec.attemptNr += 1 + jobSpec.add_out_file(fileSpec) + # increment attempt number + varMap = dict() + varMap[':fileID'] = fileSpec.fileID + self.execute(sqlFU, varMap) + jobSpecList.append(jobSpec) + # commit + if len(resFileList) > 0: + self.commit() + # get associated files + if has_out_file_flag == JobSpec.HO_hasZipOutput: + for fileSpec in jobSpec.outFiles: + varMap = dict() + varMap[':PandaID'] = fileSpec.PandaID + varMap[':zipFileID'] = fileSpec.fileID + varMap[':type'] = 'input' + self.execute(sqlAF, varMap) + resAFs = self.cur.fetchall() + for resAF in resAFs: + assFileSpec = FileSpec() + assFileSpec.pack(resAF) + fileSpec.add_associated_file(assFileSpec) + # get associated workers + tmpWorkers = self.get_workers_with_job_id(jobSpec.PandaID, use_commit=False) + jobSpec.add_workspec_list(tmpWorkers) + tmpLog.debug('got {0} jobs'.format(len(jobSpecList))) + return jobSpecList + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # update job for stage-out + def update_job_for_stage_out(self, jobspec, update_event_status, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, + 'PandaID={0} subStatus={1} thr={2}'.format(jobspec.PandaID, + jobspec.subStatus, + locked_by), + method_name='update_job_for_stage_out') + tmpLog.debug('start') + # sql to update event + sqlEU = "UPDATE {0} ".format(eventTableName) + sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus " + sqlEU += "WHERE eventRangeID=:eventRangeID " + sqlEU += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone " + # sql to update associated events + sqlAE = "UPDATE {0} ".format(eventTableName) + sqlAE += "SET eventStatus=:eventStatus,subStatus=:subStatus " + sqlAE += "WHERE eventRangeID IN " + sqlAE += "(SELECT eventRangeID FROM {0} ".format(fileTableName) + sqlAE += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID) " + sqlAE += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone " + # sql to lock job again + sqlLJ = "UPDATE {0} SET stagerTime=:timeNow ".format(jobTableName) + sqlLJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy " + # sql to check lock + sqlLC = "SELECT stagerLock FROM {0} ".format(jobTableName) + sqlLC += "WHERE PandaID=:PandaID " + # lock + varMap = dict() + varMap[':PandaID'] = jobspec.PandaID + varMap[':lockedBy'] = locked_by + varMap[':timeNow'] = datetime.datetime.utcnow() + self.execute(sqlLJ, varMap) + nRow = self.cur.rowcount + # check just in case since nRow can be 0 if two lock actions are too close in time + if nRow == 0: + varMap = dict() + varMap[':PandaID'] = jobspec.PandaID + self.execute(sqlLC, varMap) + resLC = self.cur.fetchone() + if resLC is not None and resLC[0] == locked_by: + nRow = 1 + # commit + self.commit() + if nRow == 0: + tmpLog.debug('skip since locked by another') + return None + # update files + tmpLog.debug('update {0} files'.format(len(jobspec.outFiles))) + for fileSpec in jobspec.outFiles: + # sql to update file + sqlF = "UPDATE {0} SET {1} ".format(fileTableName, fileSpec.bind_update_changes_expression()) + sqlF += "WHERE PandaID=:PandaID AND fileID=:fileID " + varMap = fileSpec.values_map(only_changed=True) + updated = False + if len(varMap) > 0: + varMap[':PandaID'] = fileSpec.PandaID + varMap[':fileID'] = fileSpec.fileID + self.execute(sqlF, varMap) + updated = True + # update event status + if update_event_status: + if fileSpec.eventRangeID is not None: + varMap = dict() + varMap[':eventRangeID'] = fileSpec.eventRangeID + varMap[':eventStatus'] = fileSpec.status + varMap[':subStatus'] = fileSpec.status + varMap[':statusFailed'] = 'failed' + varMap[':statusDone'] = 'done' + self.execute(sqlEU, varMap) + updated = True + if fileSpec.isZip == 1: + # update files associated with zip file + varMap = dict() + varMap[':PandaID'] = fileSpec.PandaID + varMap[':zipFileID'] = fileSpec.fileID + varMap[':eventStatus'] = fileSpec.status + varMap[':subStatus'] = fileSpec.status + varMap[':statusFailed'] = 'failed' + varMap[':statusDone'] = 'done' + self.execute(sqlAE, varMap) + updated = True + nRow = self.cur.rowcount + tmpLog.debug('updated {0} events'.format(nRow)) + if updated: + # lock job again + varMap = dict() + varMap[':PandaID'] = jobspec.PandaID + varMap[':lockedBy'] = locked_by + varMap[':timeNow'] = datetime.datetime.utcnow() + self.execute(sqlLJ, varMap) + # commit + self.commit() + nRow = self.cur.rowcount + if nRow == 0: + tmpLog.debug('skip since locked by another') + return None + # count files + sqlC = "SELECT COUNT(*) cnt,status FROM {0} ".format(fileTableName) + sqlC += "WHERE PandaID=:PandaID GROUP BY status " + varMap = dict() + varMap[':PandaID'] = jobspec.PandaID + self.execute(sqlC, varMap) + resC = self.cur.fetchall() + cntMap = {} + for cnt, fileStatus in resC: + cntMap[fileStatus] = cnt + # set job attributes + jobspec.stagerLock = None + if 'zipping' in cntMap: + jobspec.hasOutFile = JobSpec.HO_hasZipOutput + elif 'defined' in cntMap: + jobspec.hasOutFile = JobSpec.HO_hasOutput + elif 'transferring' in cntMap: + jobspec.hasOutFile = JobSpec.HO_hasTransfer + else: + jobspec.hasOutFile = JobSpec.HO_noOutput + if jobspec.subStatus == 'to_transfer': + # change subStatus when no more files to trigger transfer + if jobspec.hasOutFile not in [JobSpec.HO_hasOutput, JobSpec.HO_hasZipOutput]: + jobspec.subStatus = 'transferring' + jobspec.stagerTime = None + elif jobspec.subStatus == 'transferring': + # all done + if jobspec.hasOutFile == JobSpec.HO_noOutput: + jobspec.trigger_propagation() + if 'failed' in cntMap: + jobspec.status = 'failed' + jobspec.subStatus = 'failed_to_stage_out' + else: + jobspec.subStatus = 'staged' + # get finished files + jobspec.reset_out_file() + sqlFF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlFF += "WHERE PandaID=:PandaID AND status=:status AND fileType IN (:type1,:type2) " + varMap = dict() + varMap[':PandaID'] = jobspec.PandaID + varMap[':status'] = 'finished' + varMap[':type1'] = 'output' + varMap[':type2'] = 'log' + self.execute(sqlFF, varMap) + resFileList = self.cur.fetchall() + for resFile in resFileList: + fileSpec = FileSpec() + fileSpec.pack(resFile) + jobspec.add_out_file(fileSpec) + # make file report + jobspec.outputFilesToReport = core_utils.get_output_file_report(jobspec) + # sql to update job + sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobspec.bind_update_changes_expression()) + sqlJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy " + # update job + varMap = jobspec.values_map(only_changed=True) + varMap[':PandaID'] = jobspec.PandaID + varMap[':lockedBy'] = locked_by + self.execute(sqlJ, varMap) + # commit + self.commit() + tmpLog.debug('done') + # return + return jobspec.subStatus + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # add a seq number + def add_seq_number(self, number_name, init_value): + try: + # check if already there + sqlC = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName) + varMap = dict() + varMap[':numberName'] = number_name + self.execute(sqlC, varMap) + res = self.cur.fetchone() + # insert if missing + if res is None: + # make spec + seqNumberSpec = SeqNumberSpec() + seqNumberSpec.numberName = number_name + seqNumberSpec.curVal = init_value + # insert + sqlI = "INSERT INTO {0} ({1}) ".format(seqNumberTableName, SeqNumberSpec.column_names()) + sqlI += SeqNumberSpec.bind_values_expression() + varMap = seqNumberSpec.values_list() + self.execute(sqlI, varMap) + # commit + self.commit() + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get next value for a seq number + def get_next_seq_number(self, number_name): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'name={0}'.format(number_name), + method_name='get_next_seq_number') + # increment + sqlU = "UPDATE {0} SET curVal=curVal+1 WHERE numberName=:numberName ".format(seqNumberTableName) + varMap = dict() + varMap[':numberName'] = number_name + self.execute(sqlU, varMap) + # get + sqlG = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName) + varMap = dict() + varMap[':numberName'] = number_name + self.execute(sqlG, varMap) + retVal, = self.cur.fetchone() + # commit + self.commit() + tmpLog.debug('got {0}'.format(retVal)) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # get last update time for a cached info + def get_cache_last_update_time(self, main_key, sub_key): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key), + method_name='get_cache_last_update_time') + # get + varMap = dict() + varMap[":mainKey"] = main_key + sqlU = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName) + if sub_key is not None: + sqlU += "AND subKey=:subKey " + varMap[":subKey"] = sub_key + self.execute(sqlU, varMap) + retVal = self.cur.fetchone() + if retVal is not None: + retVal, = retVal + # commit + self.commit() + tmpLog.debug('got {0}'.format(retVal)) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # refresh a cached info + def refresh_cache(self, main_key, sub_key, new_info): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key), + method_name='refresh_cache') + # make spec + cacheSpec = CacheSpec() + cacheSpec.lastUpdate = datetime.datetime.utcnow() + cacheSpec.data = new_info + # check if already there + varMap = dict() + varMap[":mainKey"] = main_key + sqlC = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName) + if sub_key is not None: + sqlC += "AND subKey=:subKey " + varMap[":subKey"] = sub_key + self.execute(sqlC, varMap) + retC = self.cur.fetchone() + if retC is None: + # insert if missing + cacheSpec.mainKey = main_key + cacheSpec.subKey = sub_key + sqlU = "INSERT INTO {0} ({1}) ".format(cacheTableName, CacheSpec.column_names()) + sqlU += CacheSpec.bind_values_expression() + varMap = cacheSpec.values_list() + else: + # update + sqlU = "UPDATE {0} SET {1} ".format(cacheTableName, cacheSpec.bind_update_changes_expression()) + sqlU += "WHERE mainKey=:mainKey " + varMap = cacheSpec.values_map(only_changed=True) + varMap[":mainKey"] = main_key + if sub_key is not None: + sqlU += "AND subKey=:subKey " + varMap[":subKey"] = sub_key + self.execute(sqlU, varMap) + # commit + self.commit() + # put into global dict + cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key) + globalDict = core_utils.get_global_dict() + globalDict.acquire() + globalDict[cacheKey] = cacheSpec.data + globalDict.release() + tmpLog.debug('refreshed') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get a cached info + def get_cache(self, main_key, sub_key=None): + useDB = False + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key), + method_name='get_cache') + tmpLog.debug('start') + # get from global dict + cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key) + globalDict = core_utils.get_global_dict() + # lock dict + globalDict.acquire() + # found + if cacheKey in globalDict: + # release dict + globalDict.release() + # make spec + cacheSpec = CacheSpec() + cacheSpec.data = globalDict[cacheKey] + else: + # read from database + useDB = True + sql = "SELECT {0} FROM {1} ".format(CacheSpec.column_names(), cacheTableName) + sql += "WHERE mainKey=:mainKey " + varMap = dict() + varMap[":mainKey"] = main_key + if sub_key is not None: + sql += "AND subKey=:subKey " + varMap[":subKey"] = sub_key + self.execute(sql, varMap) + resJ = self.cur.fetchone() + # commit + self.commit() + if resJ is None: + # release dict + globalDict.release() + return None + # make spec + cacheSpec = CacheSpec() + cacheSpec.pack(resJ) + # put into global dict + globalDict[cacheKey] = cacheSpec.data + # release dict + globalDict.release() + tmpLog.debug('done') + # return + return cacheSpec + except Exception: + if useDB: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # store commands + def store_commands(self, command_specs): + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='store_commands') + tmpLog.debug('{0} commands'.format(len(command_specs))) + if not command_specs: + return True + try: + # sql to insert a command + sql = "INSERT INTO {0} ({1}) ".format(commandTableName, CommandSpec.column_names()) + sql += CommandSpec.bind_values_expression() + # loop over all commands + var_maps = [] + for command_spec in command_specs: + var_map = command_spec.values_list() + var_maps.append(var_map) + # insert + self.executemany(sql, var_maps) + # commit + self.commit() + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # get commands for a receiver + def get_commands_for_receiver(self, receiver, command_pattern=None): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_commands_for_receiver') + tmpLog.debug('start') + # sql to get commands + varMap = dict() + varMap[':receiver'] = receiver + varMap[':processed'] = 0 + sqlG = "SELECT {0} FROM {1} ".format(CommandSpec.column_names(), commandTableName) + sqlG += "WHERE receiver=:receiver AND processed=:processed " + if command_pattern is not None: + varMap[':command'] = command_pattern + if '%' in command_pattern: + sqlG += "AND command LIKE :command " + else: + sqlG += "AND command=:command " + sqlG += "FOR UPDATE " + # sql to lock command + sqlL = "UPDATE {0} SET processed=:processed WHERE command_id=:command_id ".format(commandTableName) + self.execute(sqlG, varMap) + commandSpecList = [] + for res in self.cur.fetchall(): + # make command + commandSpec = CommandSpec() + commandSpec.pack(res) + # lock + varMap = dict() + varMap[':command_id'] = commandSpec.command_id + varMap[':processed'] = 1 + self.execute(sqlL, varMap) + # append + commandSpecList.append(commandSpec) + # commit + self.commit() + tmpLog.debug('got {0} commands'.format(len(commandSpecList))) + return commandSpecList + except Exception: + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # get command ids that have been processed and need to be acknowledged to panda server + def get_commands_ack(self): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_commands_ack') + tmpLog.debug('start') + # sql to get commands that have been processed and need acknowledgement + sql = """ + SELECT command_id FROM {0} + WHERE ack_requested=1 + AND processed=1 + """.format(commandTableName) + self.execute(sql) + command_ids = [row[0] for row in self.cur.fetchall()] + tmpLog.debug('command_ids {0}'.format(command_ids)) + return command_ids + except Exception: + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + def clean_commands_by_id(self, commands_ids): + """ + Deletes the commands specified in a list of IDs + """ + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='clean_commands_by_id') + try: + # sql to delete a specific command + sql = """ + DELETE FROM {0} + WHERE command_id=:command_id""".format(commandTableName) + + for command_id in commands_ids: + var_map = {':command_id': command_id} + self.execute(sql, var_map) + self.commit() + return True + except Exception: + self.rollback() + core_utils.dump_error_message(tmpLog) + return False + + def clean_processed_commands(self): + """ + Deletes the commands that have been processed and do not need acknowledgement + """ + tmpLog = core_utils.make_logger(_logger, method_name='clean_processed_commands') + try: + # sql to delete all processed commands that do not need an ACK + sql = """ + DELETE FROM {0} + WHERE (ack_requested=0 AND processed=1) + """.format(commandTableName) + self.execute(sql) + self.commit() + return True + except Exception: + self.rollback() + core_utils.dump_error_message(tmpLog) + return False + + # get workers to kill + def get_workers_to_kill(self, max_workers, check_interval): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_kill') + tmpLog.debug('start') + # sql to get worker IDs + sqlW = "SELECT workerID,status,configID FROM {0} ".format(workTableName) + sqlW += "WHERE killTime IS NOT NULL AND killTime<:checkTimeLimit " + sqlW += "ORDER BY killTime LIMIT {0} ".format(max_workers) + # sql to lock or release worker + sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName) + sqlL += "WHERE workerID=:workerID " + sqlL += "AND killTime IS NOT NULL AND killTime<:checkTimeLimit " + # sql to get workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + timeNow = datetime.datetime.utcnow() + timeLimit = timeNow - datetime.timedelta(seconds=check_interval) + # get workerIDs + varMap = dict() + varMap[':checkTimeLimit'] = timeLimit + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + retVal = dict() + for workerID, workerStatus, configID in resW: + # ignore configID + if not core_utils.dynamic_plugin_change(): + configID = None + # lock or release worker + varMap = dict() + varMap[':workerID'] = workerID + varMap[':checkTimeLimit'] = timeLimit + if workerStatus in (WorkSpec.ST_cancelled, WorkSpec.ST_failed, WorkSpec.ST_finished): + # release + varMap[':setTime'] = None + else: + # lock + varMap[':setTime'] = timeNow + self.execute(sqlL, varMap) + # get worker + nRow = self.cur.rowcount + if nRow == 1 and varMap[':setTime'] is not None: + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + queueName = workSpec.computingSite + retVal.setdefault(queueName, dict()) + retVal[queueName].setdefault(configID, []) + retVal[queueName][configID].append(workSpec) + # commit + self.commit() + tmpLog.debug('got {0} workers'.format(len(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get worker stats + def get_worker_stats(self, site_name): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats') + tmpLog.debug('start') + # sql to get nQueueLimit + sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName) + sqlQ += "WHERE siteName=:siteName " + # get nQueueLimit + varMap = dict() + varMap[':siteName'] = site_name + self.execute(sqlQ, varMap) + resQ = self.cur.fetchall() + retMap = dict() + for computingSite, resourceType, nNewWorkers in resQ: + if resourceType not in retMap: + retMap[resourceType] = { + 'running': 0, + 'submitted': 0, + 'to_submit': nNewWorkers + } + # get worker stats + sqlW = "SELECT wt.status, wt.computingSite, pq.resourceType, COUNT(*) cnt " + sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName) + sqlW += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status IN (:st1,:st2) " + sqlW += "GROUP BY wt.status, wt.computingSite, pq.resourceType " + # get worker stats + varMap = dict() + varMap[':siteName'] = site_name + varMap[':st1'] = 'running' + varMap[':st2'] = 'submitted' + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + for workerStatus, computingSite, resourceType, cnt in resW: + if resourceType not in retMap: + retMap[resourceType] = { + 'running': 0, + 'submitted': 0, + 'to_submit': 0 + } + retMap[resourceType][workerStatus] = cnt + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retMap))) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get worker stats + def get_worker_stats_bulk(self, active_ups_queues): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats_bulk') + tmpLog.debug('start') + # sql to get nQueueLimit + sqlQ = "SELECT queueName, resourceType, nNewWorkers FROM {0} ".format(pandaQueueTableName) + + # get nQueueLimit + self.execute(sqlQ) + resQ = self.cur.fetchall() + retMap = dict() + for computingSite, resourceType, nNewWorkers in resQ: + retMap.setdefault(computingSite, {}) + if resourceType and resourceType != 'ANY' and resourceType not in retMap[computingSite]: + retMap[computingSite][resourceType] = {'running': 0, 'submitted': 0, 'to_submit': nNewWorkers} + + # get worker stats + sqlW = "SELECT wt.status, wt.computingSite, wt.resourceType, COUNT(*) cnt " + sqlW += "FROM {0} wt ".format(workTableName) + sqlW += "WHERE wt.status IN (:st1,:st2) " + sqlW += "GROUP BY wt.status,wt.computingSite, wt.resourceType " + # get worker stats + varMap = dict() + varMap[':st1'] = 'running' + varMap[':st2'] = 'submitted' + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + for workerStatus, computingSite, resourceType, cnt in resW: + if resourceType and resourceType != 'ANY': + retMap.setdefault(computingSite, {}) + retMap[computingSite].setdefault(resourceType, {'running': 0, 'submitted': 0, 'to_submit': 0}) + retMap[computingSite][resourceType][workerStatus] = cnt + + # if there are no jobs for an active UPS queue, it needs to be initialized so that the pilot streaming + # on panda server starts processing the queue + if active_ups_queues: + for ups_queue in active_ups_queues: + if ups_queue not in retMap or not retMap[ups_queue]: + retMap[ups_queue] = {'SCORE': {'running': 0, 'submitted': 0, 'to_submit': 0}} + + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retMap))) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # send kill command to workers associated to a job + def kill_workers_with_job(self, panda_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), + method_name='kill_workers_with_job') + tmpLog.debug('start') + # sql to set killTime + sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName) + sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) " + # sql to get associated workers + sqlA = "SELECT workerID FROM {0} ".format(jobWorkerTableName) + sqlA += "WHERE PandaID=:pandaID " + # set an older time to trigger sweeper + setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6) + # get workers + varMap = dict() + varMap[':pandaID'] = panda_id + self.execute(sqlA, varMap) + resA = self.cur.fetchall() + nRow = 0 + for workerID, in resA: + # set killTime + varMap = dict() + varMap[':workerID'] = workerID + varMap[':setTime'] = setTime + varMap[':st1'] = WorkSpec.ST_finished + varMap[':st2'] = WorkSpec.ST_failed + varMap[':st3'] = WorkSpec.ST_cancelled + self.execute(sqlL, varMap) + nRow += self.cur.rowcount + # commit + self.commit() + tmpLog.debug('set killTime to {0} workers'.format(nRow)) + return nRow + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # send kill command to a worker + def kill_worker(self, worker_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id), + method_name='kill_worker') + tmpLog.debug('start') + # sql to set killTime + sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName) + sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) " + # set an older time to trigger sweeper + setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6) + # set killTime + varMap = dict() + varMap[':workerID'] = worker_id + varMap[':setTime'] = setTime + varMap[':st1'] = WorkSpec.ST_finished + varMap[':st2'] = WorkSpec.ST_failed + varMap[':st3'] = WorkSpec.ST_cancelled + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + tmpLog.debug('set killTime with {0}'.format(nRow)) + return nRow + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # get workers for cleanup + def get_workers_for_cleanup(self, max_workers, status_timeout_map): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_for_cleanup') + tmpLog.debug('start') + # sql to get worker IDs + timeNow = datetime.datetime.utcnow() + modTimeLimit = timeNow - datetime.timedelta(minutes=60) + varMap = dict() + varMap[':timeLimit'] = modTimeLimit + sqlW = "SELECT workerID, configID FROM {0} ".format(workTableName) + sqlW += "WHERE lastUpdate IS NULL AND (" + for tmpStatus, tmpTimeout in iteritems(status_timeout_map): + tmpStatusKey = ':status_{0}'.format(tmpStatus) + tmpTimeoutKey = ':timeLimit_{0}'.format(tmpStatus) + sqlW += '(status={0} AND endTime<={1}) OR '.format(tmpStatusKey, tmpTimeoutKey) + varMap[tmpStatusKey] = tmpStatus + varMap[tmpTimeoutKey] = timeNow - datetime.timedelta(hours=tmpTimeout) + sqlW = sqlW[:-4] + sqlW += ') ' + sqlW += 'AND modificationTime<:timeLimit ' + sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers) + # sql to lock or release worker + sqlL = "UPDATE {0} SET modificationTime=:setTime ".format(workTableName) + sqlL += "WHERE workerID=:workerID AND modificationTime<:timeLimit " + # sql to check associated jobs + sqlA = "SELECT COUNT(*) cnt FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName) + sqlA += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID " + sqlA += "AND propagatorTime IS NOT NULL " + # sql to get workers + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlG += "WHERE workerID=:workerID " + # sql to get PandaIDs + sqlP = "SELECT j.PandaID FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName) + sqlP += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID " + # sql to get jobs + sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName) + sqlJ += "WHERE PandaID=:PandaID " + # sql to get files + sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlF += "WHERE PandaID=:PandaID " + # sql to get files not to be deleted. b.todelete is not used to use index on b.lfn + sqlD = "SELECT b.lfn,b.todelete FROM {0} a, {0} b ".format(fileTableName) + sqlD += "WHERE a.PandaID=:PandaID AND a.fileType=:fileType AND b.lfn=a.lfn " + # get workerIDs + timeNow = datetime.datetime.utcnow() + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + retVal = dict() + iWorkers = 0 + for workerID, configID in resW: + # lock worker + varMap = dict() + varMap[':workerID'] = workerID + varMap[':setTime'] = timeNow + varMap[':timeLimit'] = modTimeLimit + self.execute(sqlL, varMap) + # commit + self.commit() + if self.cur.rowcount == 0: + continue + # ignore configID + if not core_utils.dynamic_plugin_change(): + configID = None + # check associated jobs + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlA, varMap) + nActJobs, = self.cur.fetchone() + # cleanup when there is no active job + if nActJobs == 0: + # get worker + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + queueName = workSpec.computingSite + retVal.setdefault(queueName, dict()) + retVal[queueName].setdefault(configID, []) + retVal[queueName][configID].append(workSpec) + # get jobs + jobSpecs = [] + checkedLFNs = set() + keepLFNs = set() + varMap = dict() + varMap[':workerID'] = workerID + self.execute(sqlP, varMap) + resP = self.cur.fetchall() + for pandaID, in resP: + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + jobSpec = JobSpec() + jobSpec.pack(resJ) + jobSpecs.append(jobSpec) + # get LFNs not to be deleted + varMap = dict() + varMap[':PandaID'] = pandaID + varMap[':fileType'] = 'input' + self.execute(sqlD, varMap) + resDs = self.cur.fetchall() + for tmpLFN, tmpTodelete in resDs: + if tmpTodelete == 0: + keepLFNs.add(tmpLFN) + # get files to be deleted + varMap = dict() + varMap[':PandaID'] = jobSpec.PandaID + self.execute(sqlF, varMap) + resFs = self.cur.fetchall() + for resF in resFs: + fileSpec = FileSpec() + fileSpec.pack(resF) + # skip if already checked + if fileSpec.lfn in checkedLFNs: + continue + checkedLFNs.add(fileSpec.lfn) + # check if it is ready to delete + if fileSpec.lfn not in keepLFNs: + jobSpec.add_file(fileSpec) + workSpec.set_jobspec_list(jobSpecs) + iWorkers += 1 + tmpLog.debug('got {0} workers'.format(iWorkers)) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # delete a worker + def delete_worker(self, worker_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id), + method_name='delete_worker') + tmpLog.debug('start') + # sql to get jobs + sqlJ = "SELECT PandaID FROM {0} ".format(jobWorkerTableName) + sqlJ += "WHERE workerID=:workerID " + # sql to delete job + sqlDJ = "DELETE FROM {0} ".format(jobTableName) + sqlDJ += "WHERE PandaID=:PandaID " + # sql to delete files + sqlDF = "DELETE FROM {0} ".format(fileTableName) + sqlDF += "WHERE PandaID=:PandaID " + # sql to delete events + sqlDE = "DELETE FROM {0} ".format(eventTableName) + sqlDE += "WHERE PandaID=:PandaID " + # sql to delete relations + sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName) + sqlDR += "WHERE PandaID=:PandaID " + # sql to delete worker + sqlDW = "DELETE FROM {0} ".format(workTableName) + sqlDW += "WHERE workerID=:workerID " + # get jobs + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlJ, varMap) + resJ = self.cur.fetchall() + for pandaID, in resJ: + varMap = dict() + varMap[':PandaID'] = pandaID + # delete job + self.execute(sqlDJ, varMap) + # delete files + self.execute(sqlDF, varMap) + # delete events + self.execute(sqlDE, varMap) + # delete relations + self.execute(sqlDR, varMap) + # delete worker + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlDW, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # release jobs + def release_jobs(self, panda_ids, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='release_jobs') + tmpLog.debug('start for {0} jobs'.format(len(panda_ids))) + # sql to release job + sql = "UPDATE {0} SET lockedBy=NULL ".format(jobTableName) + sql += "WHERE PandaID=:pandaID AND lockedBy=:lockedBy " + nJobs = 0 + for pandaID in panda_ids: + varMap = dict() + varMap[':pandaID'] = pandaID + varMap[':lockedBy'] = locked_by + self.execute(sql, varMap) + if self.cur.rowcount > 0: + nJobs += 1 + # commit + self.commit() + tmpLog.debug('released {0} jobs'.format(nJobs)) + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # clone queue + def clone_queue_with_new_resource_type(self, site_name, queue_name, resource_type, new_workers): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'site_name={0} queue_name={1}'.format(site_name, queue_name), + method_name='clone_queue_with_new_resource_type') + tmpLog.debug('start') + + # get the values from one of the existing queues + sql_select_queue = "SELECT {0} FROM {1} ".format(PandaQueueSpec.column_names(), pandaQueueTableName) + sql_select_queue += "WHERE siteName=:siteName " + var_map = dict() + var_map[':siteName'] = site_name + self.execute(sql_select_queue, var_map) + queue = self.cur.fetchone() + + if queue: # a queue to clone was found + var_map = {} + attribute_list = [] + attr_binding_list = [] + for attribute, value in zip(PandaQueueSpec.column_names().split(','), queue): + attr_binding = ':{0}'.format(attribute) + if attribute == 'resourceType': + var_map[attr_binding] = resource_type + elif attribute == 'nNewWorkers': + var_map[attr_binding] = new_workers + elif attribute == 'uniqueName': + var_map[attr_binding] = core_utils.get_unique_queue_name(queue_name, resource_type) + else: + var_map[attr_binding] = value + attribute_list.append(attribute) + attr_binding_list.append(attr_binding) + sql_insert = "INSERT IGNORE INTO {0} ({1}) ".format(pandaQueueTableName, ','.join(attribute_list)) + sql_values = "VALUES ({0}) ".format(','.join(attr_binding_list)) + + self.execute(sql_insert + sql_values, var_map) + else: + tmpLog.debug("Failed to clone the queue") + self.commit() + return True + except Exception: + self.rollback() + core_utils.dump_error_message(_logger) + return False + + # set queue limit + def set_queue_limit(self, site_name, params): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'siteName={0}'.format(site_name), method_name='set_queue_limit') + tmpLog.debug('start') + + # sql to reset queue limits before setting new command to avoid old values being repeated again and again + sql_reset = "UPDATE {0} ".format(pandaQueueTableName) + sql_reset += "SET nNewWorkers=:zero WHERE siteName=:siteName " + + # sql to get resource types + sql_get_resource = "SELECT resourceType FROM {0} ".format(pandaQueueTableName) + sql_get_resource += "WHERE siteName=:siteName " + sql_get_resource += "FOR UPDATE " + + # sql to update nQueueLimit + sql_update_queue = "UPDATE {0} ".format(pandaQueueTableName) + sql_update_queue += "SET nNewWorkers=:nQueue WHERE siteName=:siteName AND resourceType=:resourceType " + + # sql to get num of submitted workers + sql_count_workers = "SELECT COUNT(*) cnt " + sql_count_workers += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName) + sql_count_workers += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status=:status " + sql_count_workers += "ANd pq.resourceType=:resourceType " + + # reset nqueued for all resource types + varMap = dict() + varMap[':zero'] = 0 + varMap[':siteName'] = site_name + self.execute(sql_reset, varMap) + + # get resource types + varMap = dict() + varMap[':siteName'] = site_name + self.execute(sql_get_resource, varMap) + resRes = self.cur.fetchall() + resource_type_list = set() + for tmpRes, in resRes: + resource_type_list.add(tmpRes) + + # set all queues + nUp = 0 + retMap = dict() + queue_name = site_name + + for resource_type, value in iteritems(params): + tmpLog.debug('Processing rt {0} -> {1}'.format(resource_type, value)) + + # get num of submitted workers + varMap = dict() + varMap[':siteName'] = site_name + varMap[':resourceType'] = resource_type + varMap[':status'] = 'submitted' + self.execute(sql_count_workers, varMap) + res = self.cur.fetchone() + tmpLog.debug('{0} has {1} submitted workers'.format(resource_type, res)) + nSubmittedWorkers = 0 + if res is not None: + nSubmittedWorkers, = res + + # set new value + # value = max(value - nSubmittedWorkers, 0) + if value is None: + value = 0 + varMap = dict() + varMap[':nQueue'] = value + varMap[':siteName'] = site_name + varMap[':resourceType'] = resource_type + self.execute(sql_update_queue, varMap) + iUp = self.cur.rowcount + + # iUp is 0 when nQueue is not changed + if iUp > 0 or resource_type in resource_type_list: + # a queue was updated, add the values to the map + retMap[resource_type] = value + else: + # no queue was updated, we need to create a new one for the resource type + cloned = self.clone_queue_with_new_resource_type(site_name, queue_name, resource_type, value) + if cloned: + retMap[resource_type] = value + iUp = 1 + + nUp += iUp + tmpLog.debug('set nNewWorkers={0} to {1}:{2} with {3}'.format(value, queue_name, resource_type, iUp)) + + # commit + self.commit() + tmpLog.debug('updated {0} queues'.format(nUp)) + + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get the number of missed worker + def get_num_missed_workers(self, queue_name, criteria): + try: + # get logger + tmpLog = core_utils.make_logger(_logger,"queue={0}".format(queue_name), + method_name='get_num_missed_workers') + tmpLog.debug('start') + # get worker stats + sqlW = "SELECT COUNT(*) cnt " + sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName) + sqlW += "WHERE wt.computingSite=pq.queueName AND wt.status=:status " + # get worker stats + varMap = dict() + for attr, val in iteritems(criteria): + if attr == 'timeLimit': + sqlW += "AND wt.submitTime>:timeLimit " + varMap[':timeLimit'] = val + elif attr in ['siteName']: + sqlW += "AND pq.{0}=:{0} ".format(attr) + varMap[':{0}'.format(attr)] = val + elif attr in ['computingSite', 'computingElement']: + sqlW += "AND wt.{0}=:{0} ".format(attr) + varMap[':{0}'.format(attr)] = val + varMap[':status'] = 'missed' + self.execute(sqlW, varMap) + resW = self.cur.fetchone() + if resW is None: + nMissed = 0 + else: + nMissed, = resW + # commit + self.commit() + tmpLog.debug('got nMissed={0} for {1}'.format(nMissed, str(criteria))) + return nMissed + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return 0 + + # get a worker + def get_workers_with_job_id(self, panda_id, use_commit=True): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id), + method_name='get_workers_with_job_id') + tmpLog.debug('start') + # sql to get workerIDs + sqlW = "SELECT workerID FROM {0} WHERE PandaID=:PandaID ".format(jobWorkerTableName) + sqlW += "ORDER BY workerID " + # sql to get a worker + sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(slim=True), workTableName) + sqlG += "WHERE workerID=:workerID " + # get workerIDs + varMap = dict() + varMap[':PandaID'] = panda_id + self.execute(sqlW, varMap) + retList = [] + for worker_id, in self.cur.fetchall(): + # get a worker + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlG, varMap) + res = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(res, slim=True) + retList.append(workSpec) + # commit + if use_commit: + self.commit() + tmpLog.debug('got {0} workers'.format(len(retList))) + return retList + except Exception: + # roll back + if use_commit: + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # delete old process locks + def clean_process_locks(self): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='clean_process_locks') + tmpLog.debug('start') + # delete locks + sqlW = "DELETE FROM {0} ".format(processLockTableName) + # get worker stats + self.execute(sqlW) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get a process lock + def get_process_lock(self, process_name, locked_by, lock_interval): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by), + method_name='get_process_lock') + tmpLog.debug('start') + # delete old lock + sqlD = "DELETE FROM {0} ".format(processLockTableName) + sqlD += "WHERE lockTime<:timeLimit " + varMap = dict() + varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(hours=6) + self.execute(sqlD, varMap) + # commit + self.commit() + # check lock + sqlC = "SELECT lockTime FROM {0} ".format(processLockTableName) + sqlC += "WHERE processName=:processName " + varMap = dict() + varMap[':processName'] = process_name + self.execute(sqlC, varMap) + resC = self.cur.fetchone() + retVal = False + timeNow = datetime.datetime.utcnow() + if resC is None: + # insert lock if missing + sqlI = "INSERT INTO {0} ({1}) ".format(processLockTableName, ProcessLockSpec.column_names()) + sqlI += ProcessLockSpec.bind_values_expression() + processLockSpec = ProcessLockSpec() + processLockSpec.processName = process_name + processLockSpec.lockedBy = locked_by + processLockSpec.lockTime = timeNow + varMap = processLockSpec.values_list() + self.execute(sqlI, varMap) + retVal = True + else: + oldLockTime, = resC + timeLimit = timeNow - datetime.timedelta(seconds=lock_interval) + if oldLockTime <= timeLimit: + # update lock if old + sqlU = "UPDATE {0} SET lockedBy=:lockedBy,lockTime=:timeNow ".format(processLockTableName) + sqlU += "WHERE processName=:processName AND lockTime<=:timeLimit " + varMap = dict() + varMap[':processName'] = process_name + varMap[':lockedBy'] = locked_by + varMap[':timeLimit'] = timeLimit + varMap[':timeNow'] = timeNow + self.execute(sqlU, varMap) + if self.cur.rowcount > 0: + retVal = True + # commit + self.commit() + tmpLog.debug('done with {0}'.format(retVal)) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # release a process lock + def release_process_lock(self, process_name, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by), + method_name='release_process_lock') + tmpLog.debug('start') + # delete old lock + sqlC = "DELETE FROM {0} ".format(processLockTableName) + sqlC += "WHERE processName=:processName AND lockedBy=:lockedBy " + varMap = dict() + varMap[':processName'] = process_name + varMap[':lockedBy'] = locked_by + self.execute(sqlC, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get file status + def get_file_status(self, lfn, file_type, endpoint, job_status): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint), + method_name='get_file_status') + tmpLog.debug('start') + # sql to get files + sqlF = "SELECT f.status, COUNT(*) cnt FROM {0} f, {1} j ".format(fileTableName, jobTableName) + sqlF += "WHERE j.PandaID=f.PandaID AND j.status=:jobStatus " + sqlF += "AND f.lfn=:lfn AND f.fileType=:type " + if endpoint is not None: + sqlF += "AND f.endpoint=:endpoint " + sqlF += "GROUP BY f.status " + # get files + varMap = dict() + varMap[':lfn'] = lfn + varMap[':type'] = file_type + varMap[':jobStatus'] = job_status + if endpoint is not None: + varMap[':endpoint'] = endpoint + self.execute(sqlF, varMap) + retMap = dict() + for status, cnt in self.cur.fetchall(): + retMap[status] = cnt + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retMap))) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # change file status + def change_file_status(self, panda_id, data, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='change_file_status') + tmpLog.debug('start lockedBy={0}'.format(locked_by)) + # sql to check lock of job + sqlJ = "SELECT lockedBy FROM {0} ".format(jobTableName) + sqlJ += "WHERE PandaID=:PandaID FOR UPDATE " + # sql to update files + sqlF = "UPDATE {0} ".format(fileTableName) + sqlF += "SET status=:status WHERE fileID=:fileID " + # check lock + varMap = dict() + varMap[':PandaID'] = panda_id + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + if resJ is None: + tmpLog.debug('skip since job not found') + else: + lockedBy, = resJ + if lockedBy != locked_by: + tmpLog.debug('skip since lockedBy is inconsistent in DB {0}'.format(lockedBy)) + else: + # update files + for tmpFileID, tmpLFN, newStatus in data: + varMap = dict() + varMap[':fileID'] = tmpFileID + varMap[':status'] = newStatus + self.execute(sqlF, varMap) + tmpLog.debug('set new status {0} to {1}'.format(newStatus, tmpLFN)) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get group for a file + def get_group_for_file(self, lfn, file_type, endpoint): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint), + method_name='get_group_for_file') + tmpLog.debug('start') + # sql to get group with the latest update + sqlF = "SELECT * FROM (" + sqlF += "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName) + sqlF += "WHERE lfn=:lfn AND fileType=:type " + sqlF += "AND groupID IS NOT NULL AND groupStatus<>:ngStatus " + if endpoint is not None: + sqlF += "AND endpoint=:endpoint " + sqlF += "ORDER BY groupUpdateTime DESC " + sqlF += ") AS TMP LIMIT 1 " + # get group + varMap = dict() + varMap[':lfn'] = lfn + varMap[':type'] = file_type + varMap[':ngStatus'] = 'failed' + if endpoint is not None: + varMap[':endpoint'] = endpoint + self.execute(sqlF, varMap) + resF = self.cur.fetchone() + if resF is None: + retVal = None + else: + groupID, groupStatus, groupUpdateTime = resF + retVal = {'groupID': groupID, 'groupStatus': groupStatus, 'groupUpdateTime': groupUpdateTime} + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return None + + # get files with a group ID + def get_files_with_group_id(self, group_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id), + method_name='get_files_with_group_id') + tmpLog.debug('start') + # sql to get files + sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName) + sqlF += "WHERE groupID=:groupID " + # get files + varMap = dict() + varMap[':groupID'] = group_id + retList = [] + self.execute(sqlF, varMap) + for resFile in self.cur.fetchall(): + fileSpec = FileSpec() + fileSpec.pack(resFile) + retList.append(fileSpec) + # commit + self.commit() + tmpLog.debug('got {0} files'.format(len(retList))) + return retList + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # update group status + def update_file_group_status(self, group_id, status_string): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id), + method_name='update_file_group_status') + tmpLog.debug('start') + # sql to get files + sqlF = "UPDATE {0} set groupStatus=:groupStatus ".format(fileTableName) + sqlF += "WHERE groupID=:groupID " + # get files + varMap = dict() + varMap[':groupID'] = group_id + varMap[':groupStatus'] = status_string + self.execute(sqlF, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + tmpLog.debug('updated {0} files'.format(nRow)) + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get file group status + def get_file_group_status(self, group_id): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id), + method_name='get_file_group_status') + tmpLog.debug('start') + # sql to get files + sqlF = "SELECT DISTINCT groupStatus FROM {0} ".format(fileTableName) + sqlF += "WHERE groupID=:groupID " + # get files + varMap = dict() + varMap[':groupID'] = group_id + self.execute(sqlF, varMap) + res = self.cur.fetchall() + retVal = set() + for groupStatus, in res: + retVal.add(groupStatus) + # commit + self.commit() + tmpLog.debug('get {0}'.format(str(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # lock job again + def lock_job_again(self, panda_id, time_column, lock_column, locked_by): + try: + tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='lock_job_again') + tmpLog.debug('start column={0} id={1}'.format(lock_column, locked_by)) + # check lock + sqlC = "SELECT {0},{1} FROM {2} ".format(lock_column, time_column, jobTableName) + sqlC += "WHERE PandaID=:pandaID " + sqlC += "FOR UPDATE " + varMap = dict() + varMap[':pandaID'] = panda_id + self.execute(sqlC, varMap) + resC = self.cur.fetchone() + if resC is None: + retVal = False + tmpLog.debug('not found') + else: + oldLockedBy, oldLockedTime = resC + if oldLockedBy != locked_by: + tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime)) + retVal = False + else: + # update locked time + sqlU = "UPDATE {0} SET {1}=:timeNow WHERE pandaID=:pandaID ".format(jobTableName, time_column) + varMap = dict() + varMap[':pandaID'] = panda_id + varMap[':timeNow'] = datetime.datetime.utcnow() + self.execute(sqlU, varMap) + retVal = True + # commit + self.commit() + tmpLog.debug('done with {0}'.format(retVal)) + # return + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # set file group + def set_file_group(self, file_specs, group_id, status_string): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id), + method_name='set_file_group') + tmpLog.debug('start') + timeNow = datetime.datetime.utcnow() + # sql to update files + sqlF = "UPDATE {0} ".format(fileTableName) + sqlF += "SET groupID=:groupID,groupStatus=:groupStatus,groupUpdateTime=:groupUpdateTime " + sqlF += "WHERE lfn=:lfn " + # update files + for fileSpec in file_specs: + varMap = dict() + varMap[':groupID'] = group_id + varMap[':groupStatus'] = status_string + varMap[':groupUpdateTime'] = timeNow + varMap[':lfn'] = fileSpec.lfn + self.execute(sqlF, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # refresh file group info + def refresh_file_group_info(self, job_spec): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(job_spec.PandaID), + method_name='refresh_file_group_info') + tmpLog.debug('start') + # sql to get info + sqlF = "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName) + sqlF += "WHERE lfn=:lfn " + # get info + for fileSpec in job_spec.inFiles.union(job_spec.outFiles): + varMap = dict() + varMap[':lfn'] = fileSpec.lfn + self.execute(sqlF, varMap) + resF = self.cur.fetchone() + if resF is None: + continue + groupID, groupStatus, groupUpdateTime = resF + fileSpec.groupID = groupID + fileSpec.groupStatus = groupStatus + fileSpec.groupUpdateTime = groupUpdateTime + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # increment submission attempt + def increment_submission_attempt(self, panda_id, new_number): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id), + method_name='increment_submission_attempt') + tmpLog.debug('start with newNum={0}'.format(new_number)) + # sql to update attempt number + sqlL = "UPDATE {0} SET submissionAttempts=:newNum ".format(jobTableName) + sqlL += "WHERE PandaID=:PandaID " + varMap = dict() + varMap[':PandaID'] = panda_id + varMap[':newNum'] = new_number + self.execute(sqlL, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get queue status + def get_worker_limits(self, site_name): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_worker_limits') + tmpLog.debug('start') + # sql to get + sqlQ = "SELECT maxWorkers,nQueueLimitWorker,nQueueLimitWorkerRatio," + sqlQ += "nQueueLimitWorkerMax,nQueueLimitWorkerMin FROM {0} ".format(pandaQueueTableName) + sqlQ += "WHERE siteName=:siteName AND resourceType='ANY'" + # sql to count resource types + sqlNT = "SELECT COUNT(*) cnt FROM {0} ".format(pandaQueueTableName) + sqlNT += "WHERE siteName=:siteName AND resourceType!='ANY'" + # sql to count running workers + sqlNR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName) + sqlNR += "WHERE computingSite=:computingSite AND status IN (:status1)" + # get + varMap = dict() + varMap[':siteName'] = site_name + self.execute(sqlQ, varMap) + resQ = self.cur.fetchall() + # count resource types + varMap = dict() + varMap[':computingSite'] = site_name + varMap[':siteName'] = site_name + self.execute(sqlNT, varMap) + resNT = self.cur.fetchall() + # count running workers + varMap = dict() + varMap[':computingSite'] = site_name + varMap[':status1'] = 'running' + self.execute(sqlNR, varMap) + resNR = self.cur.fetchall() + # dynamic nQueueLimitWorker + retMap = dict() + nRunning = 0 + nRT = 1 + for cnt, in resNR: + nRunning = cnt + for cnt, in resNT: + nRT = max(nRT, cnt) + for maxWorkers, nQueueLimitWorker_orig, nQueueLimitWorkerRatio, \ + nQueueLimitWorkerMax, nQueueLimitWorkerMin_orig in resQ: + if nQueueLimitWorkerRatio is not None and nQueueLimitWorkerRatio > 0: + nQueueLimitWorkerByRatio = int(nRunning * nQueueLimitWorkerRatio / 100) + nQueueLimitWorkerMin = 1 + if nQueueLimitWorkerMin_orig is not None: + nQueueLimitWorkerMin = nQueueLimitWorkerMin_orig + nQueueLimitWorkerMinAllRTs = nQueueLimitWorkerMin * nRT + nQueueLimitWorker = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMinAllRTs) + nQueueLimitWorkerPerRT = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMin) + if nQueueLimitWorkerMax is not None: + nQueueLimitWorker = min(nQueueLimitWorker, nQueueLimitWorkerMax) + nQueueLimitWorkerPerRT = min(nQueueLimitWorkerPerRT, nQueueLimitWorkerMax) + elif nQueueLimitWorker_orig is not None: + nQueueLimitWorker = nQueueLimitWorker_orig + nQueueLimitWorkerPerRT = nQueueLimitWorker + else: + nQueueLimitWorker = maxWorkers + nQueueLimitWorkerPerRT = nQueueLimitWorker + nQueueLimitWorker = min(nQueueLimitWorker, maxWorkers) + retMap.update({ + 'maxWorkers': maxWorkers, + 'nQueueLimitWorker': nQueueLimitWorker, + 'nQueueLimitWorkerPerRT': nQueueLimitWorkerPerRT, + }) + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retMap))) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get worker CE stats + def get_worker_ce_stats(self, site_name): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_stats') + tmpLog.debug('start') + # get worker CE stats + sqlW = "SELECT wt.status,wt.computingSite,wt.computingElement,COUNT(*) cnt " + sqlW += "FROM {0} wt ".format(workTableName) + sqlW += "WHERE wt.computingSite=:siteName AND wt.status IN (:st1,:st2) " + sqlW += "GROUP BY wt.status,wt.computingElement " + # get worker CE stats + varMap = dict() + varMap[':siteName'] = site_name + varMap[':st1'] = 'running' + varMap[':st2'] = 'submitted' + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + retMap = dict() + for workerStatus, computingSite, computingElement, cnt in resW: + if computingElement not in retMap: + retMap[computingElement] = { + 'running': 0, + 'submitted': 0, + } + retMap[computingElement][workerStatus] = cnt + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(retMap))) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # get worker CE backend throughput + def get_worker_ce_backend_throughput(self, site_name, time_window): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_backend_throughput') + tmpLog.debug('start') + # get worker CE throughput + sqlW = "SELECT wt.computingElement,wt.status,COUNT(*) cnt " + sqlW += "FROM {0} wt ".format(workTableName) + sqlW += "WHERE wt.computingSite=:siteName " + sqlW += "AND wt.status IN (:st1,:st2,:st3) " + sqlW += "AND wt.creationtime < :timeWindowMiddle " + sqlW += "AND (wt.starttime is NULL OR " + sqlW += "(wt.starttime >= :timeWindowStart AND wt.starttime < :timeWindowEnd) ) " + sqlW += "GROUP BY wt.status,wt.computingElement " + # time window start and end + timeWindowEnd = datetime.datetime.utcnow() + timeWindowStart = timeWindowEnd - datetime.timedelta(seconds=time_window) + timeWindowMiddle = timeWindowEnd - datetime.timedelta(seconds=time_window/2) + # get worker CE throughput + varMap = dict() + varMap[':siteName'] = site_name + varMap[':st1'] = 'submitted' + varMap[':st2'] = 'running' + varMap[':st3'] = 'finished' + varMap[':timeWindowStart'] = timeWindowStart + varMap[':timeWindowEnd'] = timeWindowEnd + varMap[':timeWindowMiddle'] = timeWindowMiddle + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + retMap = dict() + for computingElement, workerStatus, cnt in resW: + if computingElement not in retMap: + retMap[computingElement] = { + 'submitted': 0, + 'running': 0, + 'finished': 0, + } + retMap[computingElement][workerStatus] = cnt + # commit + self.commit() + tmpLog.debug('got {0} with time_window={1} for site {2}'.format( + str(retMap), time_window, site_name)) + return retMap + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # add dialog message + def add_dialog_message(self, message, level, module_name, identifier=None): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='add_dialog_message') + tmpLog.debug('start') + # delete old messages + sqlS = "SELECT diagID FROM {0} ".format(diagTableName) + sqlS += "WHERE creationTime<:timeLimit " + varMap = dict() + varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=60) + self.execute(sqlS, varMap) + resS = self.cur.fetchall() + sqlD = "DELETE FROM {0} ".format(diagTableName) + sqlD += "WHERE diagID=:diagID " + for diagID, in resS: + varMap = dict() + varMap[':diagID'] = diagID + self.execute(sqlD, varMap) + # commit + self.commit() + # make spec + diagSpec = DiagSpec() + diagSpec.moduleName = module_name + diagSpec.creationTime = datetime.datetime.utcnow() + diagSpec.messageLevel = level + try: + diagSpec.identifier = identifier[:100] + except Exception: + pass + diagSpec.diagMessage = message[:500] + # insert + sqlI = "INSERT INTO {0} ({1}) ".format(diagTableName, DiagSpec.column_names()) + sqlI += DiagSpec.bind_values_expression() + varMap = diagSpec.values_list() + self.execute(sqlI, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get dialog messages to send + def get_dialog_messages_to_send(self, n_messages, lock_interval): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_dialog_messages_to_send') + tmpLog.debug('start') + # sql to select messages + sqlD = "SELECT diagID FROM {0} ".format(diagTableName) + sqlD += "WHERE (lockTime IS NULL OR lockTime<:timeLimit) " + sqlD += "ORDER BY diagID LIMIT {0} ".format(n_messages) + # sql to lock message + sqlL = "UPDATE {0} SET lockTime=:timeNow ".format(diagTableName) + sqlL += "WHERE diagID=:diagID " + sqlL += "AND (lockTime IS NULL OR lockTime<:timeLimit) " + # sql to get message + sqlM = "SELECT {0} FROM {1} ".format(DiagSpec.column_names(), diagTableName) + sqlM += "WHERE diagID=:diagID " + # select messages + timeLimit = datetime.datetime.utcnow() - datetime.timedelta(seconds=lock_interval) + varMap = dict() + varMap[':timeLimit'] = timeLimit + self.execute(sqlD, varMap) + resD = self.cur.fetchall() + diagList = [] + for diagID, in resD: + # lock + varMap = dict() + varMap[':diagID'] = diagID + varMap[':timeLimit'] = timeLimit + varMap[':timeNow'] = datetime.datetime.utcnow() + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + if nRow == 1: + # get + varMap = dict() + varMap[':diagID'] = diagID + self.execute(sqlM, varMap) + resM = self.cur.fetchone() + # make spec + diagSpec = DiagSpec() + diagSpec.pack(resM) + diagList.append(diagSpec) + # commit + self.commit() + tmpLog.debug('got {0} messages'.format(len(diagList))) + return diagList + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return [] + + # delete dialog messages + def delete_dialog_messages(self, ids): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='delete_dialog_messages') + tmpLog.debug('start') + # sql to delete message + sqlM = "DELETE FROM {0} ".format(diagTableName) + sqlM += "WHERE diagID=:diagID " + for diagID in ids: + # lock + varMap = dict() + varMap[':diagID'] = diagID + self.execute(sqlM, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # delete old jobs + def delete_old_jobs(self, timeout): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'timeout={0}'.format(timeout), + method_name='delete_old_jobs') + tmpLog.debug('start') + # sql to get old jobs to be deleted + sqlGJ = "SELECT PandaID FROM {0} ".format(jobTableName) + sqlGJ += "WHERE subStatus=:subStatus AND propagatorTime IS NULL " + sqlGJ += "AND ((modificationTime IS NOT NULL AND modificationTime<:timeLimit1) " + sqlGJ += "OR (modificationTime IS NULL AND creationTime<:timeLimit2)) " + # sql to delete job + sqlDJ = "DELETE FROM {0} ".format(jobTableName) + sqlDJ += "WHERE PandaID=:PandaID " + # sql to delete files + sqlDF = "DELETE FROM {0} ".format(fileTableName) + sqlDF += "WHERE PandaID=:PandaID " + # sql to delete events + sqlDE = "DELETE FROM {0} ".format(eventTableName) + sqlDE += "WHERE PandaID=:PandaID " + # sql to delete relations + sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName) + sqlDR += "WHERE PandaID=:PandaID " + # get jobs + varMap = dict() + varMap[':subStatus'] = 'done' + varMap[':timeLimit1'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout) + varMap[':timeLimit2'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout*2) + self.execute(sqlGJ, varMap) + resGJ = self.cur.fetchall() + nDel = 0 + for pandaID, in resGJ: + varMap = dict() + varMap[':PandaID'] = pandaID + # delete job + self.execute(sqlDJ, varMap) + iDel = self.cur.rowcount + if iDel > 0: + nDel += iDel + # delete files + self.execute(sqlDF, varMap) + # delete events + self.execute(sqlDE, varMap) + # delete relations + self.execute(sqlDR, varMap) + # commit + self.commit() + tmpLog.debug('deleted {0} jobs'.format(nDel)) + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get iterator of active workers to monitor fifo + def get_active_workers(self, n_workers, seconds_ago=0): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_active_workers') + tmpLog.debug('start') + # sql to get workers + sqlW = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName) + sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) " + sqlW += "AND modificationTime<:timeLimit " + sqlW += "ORDER BY modificationTime,computingSite LIMIT {0} ".format(n_workers) + varMap = dict() + varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_ago) + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + def _get_workspec_from_record(rec): + workspec = WorkSpec() + workspec.pack(rec) + workspec.pandaid_list = [] + return workspec + retVal = map(_get_workspec_from_record, resW) + tmpLog.debug('got {0} workers'.format(len(resW))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # lock workers for specific thread + def lock_workers(self, worker_id_list, lock_interval): + try: + timeNow = datetime.datetime.utcnow() + lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval) + retVal = True + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='lock_worker') + tmpLog.debug('start') + # loop + for worker_id, attrs in iteritems(worker_id_list): + varMap = dict() + varMap[':workerID'] = worker_id + varMap[':timeNow'] = timeNow + varMap[':lockTimeLimit'] = lockTimeLimit + varMap[':st1'] = WorkSpec.ST_cancelled + varMap[':st2'] = WorkSpec.ST_finished + varMap[':st3'] = WorkSpec.ST_failed + varMap[':st4'] = WorkSpec.ST_missed + # extract lockedBy + varMap[':lockedBy'] = attrs['lockedBy'] + if attrs['lockedBy'] is None: + del attrs['lockedBy'] + # sql to lock worker + sqlL = "UPDATE {0} SET modificationTime=:timeNow".format(workTableName) + for attrKey, attrVal in iteritems(attrs): + sqlL += ',{0}=:{0}'.format(attrKey) + varMap[':{0}'.format(attrKey)] = attrVal + sqlL += " WHERE workerID=:workerID AND (lockedBy IS NULL " + sqlL += "OR (modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL)) " + sqlL += "AND (status NOT IN (:st1,:st2,:st3,:st4)) " + # lock worker + self.execute(sqlL, varMap) + nRow = self.cur.rowcount + tmpLog.debug('done with {0}'.format(nRow)) + # false if failed to lock + if nRow == 0: + retVal = False + # commit + self.commit() + # return + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get queue config dumps + def get_queue_config_dumps(self): + try: + retVal = dict() + configIDs = set() + # time limit + timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24) + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_queue_config_dumps') + tmpLog.debug('start') + # sql to get used IDs + sqlIJ = "SELECT DISTINCT configID FROM {0} ".format(jobTableName) + self.execute(sqlIJ) + resIJ = self.cur.fetchall() + for tmpID, in resIJ: + configIDs.add(tmpID) + sqlIW = "SELECT DISTINCT configID FROM {0} ".format(workTableName) + self.execute(sqlIW) + resIW = self.cur.fetchall() + for tmpID, in resIW: + configIDs.add(tmpID) + # sql to delete + sqlD = "DELETE FROM {0} WHERE configID=:configID ".format(queueConfigDumpTableName) + # sql to get config + sqlQ = "SELECT {0} FROM {1} ".format(QueueConfigDumpSpec.column_names(), queueConfigDumpTableName) + sqlQ += "FOR UPDATE " + self.execute(sqlQ) + resQs = self.cur.fetchall() + iDump = 0 + iDel = 0 + for resQ in resQs: + dumpSpec = QueueConfigDumpSpec() + dumpSpec.pack(resQ) + # delete if unused and too old + if dumpSpec.configID not in configIDs and dumpSpec.creationTime < timeLimit: + varMap = dict() + varMap[':configID'] = dumpSpec.configID + self.execute(sqlD, varMap) + iDel += 1 + else: + retVal[dumpSpec.dumpUniqueName] = dumpSpec + iDump += 1 + # commit + self.commit() + tmpLog.debug('got {0} dumps and delete {1} dumps'.format(iDump, iDel)) + # return + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return {} + + # add queue config dump + def add_queue_config_dump(self, dump_spec): + try: + # sql to insert a job + sqlJ = "INSERT INTO {0} ({1}) ".format(queueConfigDumpTableName, QueueConfigDumpSpec.column_names()) + sqlJ += QueueConfigDumpSpec.bind_values_expression() + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='add_queue_config_dumps') + tmpLog.debug('start for {0}'.format(dump_spec.dumpUniqueName)) + varMap = dump_spec.values_list() + # insert + self.execute(sqlJ, varMap) + # commit + self.commit() + tmpLog.debug('done') + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # get configID for queue config dump + def get_config_id_dump(self, dump_spec): + try: + # sql to get configID + sqlJ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName) + sqlJ += "WHERE queueName=:queueName AND dumpUniqueName=:dumpUniqueName " + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_config_id_for_dump') + tmpLog.debug('start for {0}:{1}'.format(dump_spec.queueName, dump_spec.dumpUniqueName)) + # get + varMap = dict() + varMap[':queueName'] = dump_spec.queueName + varMap[':dumpUniqueName'] = dump_spec.dumpUniqueName + self.execute(sqlJ, varMap) + resJ = self.cur.fetchone() + if resJ is not None: + configID, = resJ + else: + configID = None + tmpLog.debug('got configID={0}'.format(configID)) + # return + return configID + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return None + + # purge a panda queue + def purge_pq(self, queue_name): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'queueName={0}'.format(queue_name), + method_name='purge_pq') + tmpLog.debug('start') + # sql to get jobs + sqlJ = "SELECT PandaID FROM {0} ".format(jobTableName) + sqlJ += "WHERE computingSite=:computingSite " + # sql to get workers + sqlW = "SELECT workerID FROM {0} ".format(workTableName) + sqlW += "WHERE computingSite=:computingSite " + # sql to get queue configs + sqlQ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName) + sqlQ += "WHERE queueName=:queueName " + # sql to delete job + sqlDJ = "DELETE FROM {0} ".format(jobTableName) + sqlDJ += "WHERE PandaID=:PandaID " + # sql to delete files + sqlDF = "DELETE FROM {0} ".format(fileTableName) + sqlDF += "WHERE PandaID=:PandaID " + # sql to delete events + sqlDE = "DELETE FROM {0} ".format(eventTableName) + sqlDE += "WHERE PandaID=:PandaID " + # sql to delete relations by job + sqlDRJ = "DELETE FROM {0} ".format(jobWorkerTableName) + sqlDRJ += "WHERE PandaID=:PandaID " + # sql to delete worker + sqlDW = "DELETE FROM {0} ".format(workTableName) + sqlDW += "WHERE workerID=:workerID " + # sql to delete relations by worker + sqlDRW = "DELETE FROM {0} ".format(jobWorkerTableName) + sqlDRW += "WHERE workerID=:workerID " + # sql to delete queue config + sqlDQ = "DELETE FROM {0} ".format(queueConfigDumpTableName) + sqlDQ += "WHERE configID=:configID " + # sql to delete panda queue + sqlDP = "DELETE FROM {0} ".format(pandaQueueTableName) + sqlDP += "WHERE queueName=:queueName " + # get jobs + varMap = dict() + varMap[':computingSite'] = queue_name + self.execute(sqlJ, varMap) + resJ = self.cur.fetchall() + for pandaID, in resJ: + varMap = dict() + varMap[':PandaID'] = pandaID + # delete job + self.execute(sqlDJ, varMap) + # delete files + self.execute(sqlDF, varMap) + # delete events + self.execute(sqlDE, varMap) + # delete relations + self.execute(sqlDRJ, varMap) + # get workers + varMap = dict() + varMap[':computingSite'] = queue_name + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + for workerID, in resW: + varMap = dict() + varMap[':workerID'] = workerID + # delete workers + self.execute(sqlDW, varMap) + # delete relations + self.execute(sqlDRW, varMap) + # get queue configs + varMap = dict() + varMap[':queueName'] = queue_name + self.execute(sqlQ, varMap) + resQ = self.cur.fetchall() + for configID, in resQ: + varMap = dict() + varMap[':configID'] = configID + # delete queue configs + self.execute(sqlDQ, varMap) + # delete panda queue + varMap = dict() + varMap[':queueName'] = queue_name + self.execute(sqlDP, varMap) + # commit + self.commit() + tmpLog.debug('done') + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # disable multi workers + def disable_multi_workers(self, panda_id): + tmpLog = None + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), + method_name='disable_multi_workers') + tmpLog.debug('start') + # sql to update flag + sqlJ = "UPDATE {0} SET moreWorkers=0 ".format(jobTableName) + sqlJ += "WHERE PandaID=:pandaID AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL " + sqlJ += "AND nWorkers>0 " + # set flag + varMap = dict() + varMap[':pandaID'] = panda_id + self.execute(sqlJ, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + tmpLog.debug('done with {0}'.format(nRow)) + # return + return nRow + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return None + + # update PQ table + def update_panda_queue_attribute(self, key, value, site_name=None, queue_name=None): + tmpLog = None + try: + # get logger + tmpLog = core_utils.make_logger(_logger, 'site={0} queue={1}'.format(site_name, queue_name), + method_name='update_panda_queue') + tmpLog.debug('start key={0}'.format(key)) + # sql to update + sqlJ = "UPDATE {0} SET {1}=:{1} ".format(pandaQueueTableName, key) + sqlJ += "WHERE " + varMap = dict() + varMap[':{0}'.format(key)] = value + if site_name is not None: + sqlJ += "siteName=:siteName " + varMap[':siteName'] = site_name + else: + sqlJ += "queueName=:queueName " + varMap[':queueName'] = queue_name + # update + self.execute(sqlJ, varMap) + nRow = self.cur.rowcount + # commit + self.commit() + tmpLog.debug('done with {0}'.format(nRow)) + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # delete orphaned job info + def delete_orphaned_job_info(self): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, + method_name='delete_orphaned_job_info') + tmpLog.debug('start') + # sql to get job info to be deleted + sqlGJ = "SELECT PandaID FROM {0} " + sqlGJ += "WHERE PandaID NOT IN (" + sqlGJ += "SELECT PandaID FROM {1}) " + # sql to delete job info + sqlDJ = "DELETE FROM {0} " + sqlDJ += "WHERE PandaID=:PandaID " + # sql to delete files + sqlDF = "DELETE FROM {0} ".format(fileTableName) + sqlDF += "WHERE PandaID=:PandaID " + # sql to delete events + sqlDE = "DELETE FROM {0} ".format(eventTableName) + sqlDE += "WHERE PandaID=:PandaID " + # sql to delete relations + sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName) + sqlDR += "WHERE PandaID=:PandaID " + # loop over all tables + for tableName in [fileTableName, eventTableName, jobWorkerTableName]: + # get job info + self.execute(sqlGJ.format(tableName, jobTableName)) + resGJ = self.cur.fetchall() + nDel = 0 + for pandaID, in resGJ: + # delete + varMap = dict() + varMap[':PandaID'] = pandaID + self.execute(sqlDJ.format(tableName), varMap) + iDel = self.cur.rowcount + if iDel > 0: + nDel += iDel + # commit + self.commit() + tmpLog.debug('deleted {0} records from {1}'.format(nDel, tableName)) + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # lock worker again to feed events + def lock_worker_again_to_feed_events(self, worker_id, locked_by): + try: + tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id), + method_name='lock_worker_again_to_feed_events') + tmpLog.debug('start id={0}'.format(locked_by)) + # check lock + sqlC = "SELECT eventFeedLock,eventFeedTime FROM {0} ".format(workTableName) + sqlC += "WHERE workerID=:workerID " + sqlC += "FOR UPDATE " + varMap = dict() + varMap[':workerID'] = worker_id + self.execute(sqlC, varMap) + resC = self.cur.fetchone() + if resC is None: + retVal = False + tmpLog.debug('not found') + else: + oldLockedBy, oldLockedTime = resC + if oldLockedBy != locked_by: + tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime)) + retVal = False + else: + # update locked time + sqlU = "UPDATE {0} SET eventFeedTime=:timeNow WHERE workerID=:workerID ".format(workTableName) + varMap = dict() + varMap[':workerID'] = worker_id + varMap[':timeNow'] = datetime.datetime.utcnow() + self.execute(sqlU, varMap) + retVal = True + # commit + self.commit() + tmpLog.debug('done with {0}'.format(retVal)) + # return + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # insert service metrics + def insert_service_metrics(self, service_metric_spec): + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='insert_service_metrics') + tmpLog.debug('start') + try: + sql = "INSERT INTO {0} ({1}) ".format(serviceMetricsTableName, ServiceMetricSpec.column_names()) + sql += ServiceMetricSpec.bind_values_expression() + var_map = service_metric_spec.values_list() + + self.execute(sql, var_map) + self.commit() + + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(tmpLog) + # return + return False + + # get service metrics + def get_service_metrics(self, last_update): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_service_metrics') + tmpLog.debug('start with last_update: {0}'.format(last_update)) + sql = "SELECT creationTime, hostName, metrics FROM {0} ".format(serviceMetricsTableName) + sql += "WHERE creationTime>=:last_update " + + var_map = {':last_update': last_update} + self.execute(sql, var_map) + res = self.cur.fetchall() + + # change datetime objects to strings for json serialization later + res_corrected = [] + for entry in res: + try: + res_corrected.append([entry[0].strftime('%Y-%m-%d %H:%M:%S.%f'), entry[1], entry[2]]) + except Exception: + pass + + # commit + self.commit() + tmpLog.debug('got {0}'.format(str(res))) + return res_corrected + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + + # release a site + def release_site(self, site_name, locked_by): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='release_site') + tmpLog.debug('start') + # sql to release site + sql = "UPDATE {0} SET lockedBy=NULL ".format(pandaQueueTableName) + sql += "WHERE siteName=:siteName AND lockedBy=:lockedBy " + # release site + varMap = dict() + varMap[':siteName'] = site_name + varMap[':lockedBy'] = locked_by + self.execute(sql, varMap) + n_done = self.cur.rowcount > 0 + # commit + self.commit() + if n_done >= 1: + tmpLog.debug('released {0}'.format(site_name)) + else: + tmpLog.debug('found nothing to release. Skipped'.format(site_name)) + # return + return True + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return False + + # get workers via workerID + def get_workers_from_ids(self, ids): + try: + # get logger + tmpLog = core_utils.make_logger(_logger, method_name='get_workers_from_ids') + tmpLog.debug('start') + # sql to get workers + sqlW = ( + "SELECT workerID,configID,mapType FROM {workTableName} " + "WHERE workerID IN ({ids_str}) " + "AND status IN (:st_submitted,:st_running,:st_idle) " + ).format(workTableName=workTableName, ids_str=','.join([ str(_) for _ in ids])) + # sql to get associated workerIDs + sqlA = ( + "SELECT t.workerID FROM {jobWorkerTableName} t, {jobWorkerTableName} s, {workTableName} w " + "WHERE s.PandaID=t.PandaID AND s.workerID=:workerID " + "AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) " + ).format(jobWorkerTableName=jobWorkerTableName, workTableName=workTableName) + # sql to get associated workers + sqlG = ( + "SELECT {0} FROM {1} " + "WHERE workerID=:workerID " + ).format(WorkSpec.column_names(), workTableName) + # sql to get associated PandaIDs + sqlP = ( + "SELECT PandaID FROM {0} " + "WHERE workerID=:workerID " + ).format(jobWorkerTableName) + # get workerIDs + timeNow = datetime.datetime.utcnow() + varMap = dict() + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlW, varMap) + resW = self.cur.fetchall() + tmpWorkers = set() + for workerID, configID, mapType in resW: + # ignore configID + if not core_utils.dynamic_plugin_change(): + configID = None + tmpWorkers.add((workerID, configID, mapType)) + checkedIDs = set() + retVal = {} + for workerID, configID, mapType in tmpWorkers: + # skip + if workerID in checkedIDs: + continue + # get associated workerIDs + varMap = dict() + varMap[':workerID'] = workerID + varMap[':st_submitted'] = WorkSpec.ST_submitted + varMap[':st_running'] = WorkSpec.ST_running + varMap[':st_idle'] = WorkSpec.ST_idle + self.execute(sqlA, varMap) + resA = self.cur.fetchall() + workerIDtoScan = set() + for tmpWorkID, in resA: + workerIDtoScan.add(tmpWorkID) + # add original ID just in case since no relation when job is not yet bound + workerIDtoScan.add(workerID) + # use only the largest worker to avoid updating the same worker set concurrently + if mapType == WorkSpec.MT_MultiWorkers: + if workerID != min(workerIDtoScan): + continue + # get workers + queueName = None + workersList = [] + for tmpWorkID in workerIDtoScan: + checkedIDs.add(tmpWorkID) + # get worker + varMap = dict() + varMap[':workerID'] = tmpWorkID + self.execute(sqlG, varMap) + resG = self.cur.fetchone() + workSpec = WorkSpec() + workSpec.pack(resG) + if queueName is None: + queueName = workSpec.computingSite + workersList.append(workSpec) + # get associated PandaIDs + varMap = dict() + varMap[':workerID'] = tmpWorkID + self.execute(sqlP, varMap) + resP = self.cur.fetchall() + workSpec.pandaid_list = [] + for tmpPandaID, in resP: + workSpec.pandaid_list.append(tmpPandaID) + if len(workSpec.pandaid_list) > 0: + workSpec.nJobs = len(workSpec.pandaid_list) + # commit + self.commit() + # add + if queueName is not None: + retVal.setdefault(queueName, dict()) + retVal[queueName].setdefault(configID, []) + retVal[queueName][configID].append(workersList) + tmpLog.debug('got {0}'.format(str(retVal))) + return retVal + except Exception: + # roll back + self.rollback() + # dump error + core_utils.dump_error_message(_logger) + # return + return {} + +from django.test import TestCase +from django.core.exceptions import ValidationError +from django.contrib.flatpages.models import FlatPage + +from oscar.core.validators import ExtendedURLValidator +from oscar.core.validators import URLDoesNotExistValidator + + +class TestExtendedURLValidatorWithVerifications(TestCase): + """ + ExtendedURLValidator with verify_exists=True + """ + + def setUp(self): + self.validator = ExtendedURLValidator(verify_exists=True) + + def test_validates_local_url(self): + try: + self.validator('/') + except ValidationError: + self.fail('ExtendedURLValidator raised ValidationError' + 'unexpectedly!') + + def test_validates_local_url_with_query_strings(self): + try: + self.validator('/?q=test') # Query strings shouldn't affect validation + except ValidationError: + self.fail('ExtendedURLValidator raised ValidationError' + 'unexpectedly!') + + def test_raises_validation_error_for_missing_urls(self): + with self.assertRaises(ValidationError): + self.validator('/invalid/') + + def test_validates_urls_missing_preceding_slash(self): + try: + self.validator('catalogue/') + except ValidationError: + self.fail('ExtendedURLValidator raised ValidationError' + 'unexpectedly!') + + def test_raises_validation_error_for_urls_without_trailing_slash(self): + with self.assertRaises(ValidationError): + self.validator('/catalogue') # Missing the / is bad + + def test_validates_flatpages_urls(self): + FlatPage.objects.create(title='test page', url='/test/page/') + try: + self.validator('/test/page/') + except ValidationError: + self.fail('ExtendedURLValidator raises ValidationError' + 'unexpectedly!') + + +class TestExtendedURLValidatorWithoutVerifyExists(TestCase): + """ + ExtendedURLValidator with verify_exists=False + """ + + def setUp(self): + self.validator = URLDoesNotExistValidator() + + def test_raises_exception_for_local_urls(self): + self.assertRaises(ValidationError, self.validator, '/') + + def test_raises_exception_for_flatpages(self): + FlatPage.objects.create(title='test page', url='/test/page/') + self.assertRaises(ValidationError, self.validator, '/test/page/') + +# -*- coding:utf-8 -*- + +""" +宏观经济数据接口 +Created on 2015/01/24 +@author: Jimmy Liu +@group : waditu +@contact: jimmysoa@sina.cn +""" + +import pandas as pd +import numpy as np +import re +import json +from tushare.stock import macro_vars as vs +from tushare.stock import cons as ct +try: + from urllib.request import urlopen, Request +except ImportError: + from urllib2 import urlopen, Request + + +def get_gdp_year(): + """ + 获取年度国内生产总值数据 + Return + -------- + DataFrame + year :统计年度 + gdp :国内生产总值(亿元) + pc_gdp :人均国内生产总值(元) + gnp :国民生产总值(亿元) + pi :第一产业(亿元) + si :第二产业(亿元) + industry :工业(亿元) + cons_industry :建筑业(亿元) + ti :第三产业(亿元) + trans_industry :交通运输仓储邮电通信业(亿元) + lbdy :批发零售贸易及餐饮业(亿元) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[0], 0, 70, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + datastr = datastr.replace('"', '').replace('null', '0') + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.GDP_YEAR_COLS) + df[df==0] = np.NaN + return df + + +def get_gdp_quarter(): + """ + 获取季度国内生产总值数据 + Return + -------- + DataFrame + quarter :季度 + gdp :国内生产总值(亿元) + gdp_yoy :国内生产总值同比增长(%) + pi :第一产业增加值(亿元) + pi_yoy:第一产业增加值同比增长(%) + si :第二产业增加值(亿元) + si_yoy :第二产业增加值同比增长(%) + ti :第三产业增加值(亿元) + ti_yoy :第三产业增加值同比增长(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[0], 1, 250, + rdint)) + text = urlopen(request,timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + datastr = datastr.replace('"', '').replace('null', '0') + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.GDP_QUARTER_COLS) + df['quarter'] = df['quarter'].astype(object) + df[df==0] = np.NaN + return df + + +def get_gdp_for(): + """ + 获取三大需求对GDP贡献数据 + Return + -------- + DataFrame + year :统计年度 + end_for :最终消费支出贡献率(%) + for_rate :最终消费支出拉动(百分点) + asset_for :资本形成总额贡献率(%) + asset_rate:资本形成总额拉动(百分点) + goods_for :货物和服务净出口贡献率(%) + goods_rate :货物和服务净出口拉动(百分点) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[0], 4, 80, rdint)) + text = urlopen(request,timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + datastr = datastr.replace('"','').replace('null','0') + js = json.loads(datastr) + df = pd.DataFrame(js,columns=vs.GDP_FOR_COLS) + df[df==0] = np.NaN + return df + + +def get_gdp_pull(): + """ + 获取三大产业对GDP拉动数据 + Return + -------- + DataFrame + year :统计年度 + gdp_yoy :国内生产总值同比增长(%) + pi :第一产业拉动率(%) + si :第二产业拉动率(%) + industry:其中工业拉动(%) + ti :第三产业拉动率(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[0], 5, 60, rdint)) + text = urlopen(request,timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + datastr = datastr.replace('"', '').replace('null', '0') + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.GDP_PULL_COLS) + df[df==0] = np.NaN + return df + + +def get_gdp_contrib(): + """ + 获取三大产业贡献率数据 + Return + -------- + DataFrame + year :统计年度 + gdp_yoy :国内生产总值 + pi :第一产业献率(%) + si :第二产业献率(%) + industry:其中工业献率(%) + ti :第三产业献率(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint, + vs.MACRO_TYPE[0], 6, 60, rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + datastr = datastr.replace('"', '').replace('null', '0') + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.GDP_CONTRIB_COLS) + df[df==0] = np.NaN + return df + +def get_cpi(): + """ + 获取居民消费价格指数数据 + Return + -------- + DataFrame + month :统计月份 + cpi :价格指数 + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[1], 0, 600, + rdint)) + text = urlopen(request,timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.CPI_COLS) + df['cpi'] = df['cpi'].astype(float) + return df + + +def get_ppi(): + """ + 获取工业品出厂价格指数数据 + Return + -------- + DataFrame + month :统计月份 + ppiip :工业品出厂价格指数 + ppi :生产资料价格指数 + qm:采掘工业价格指数 + rmi:原材料工业价格指数 + pi:加工工业价格指数 + cg:生活资料价格指数 + food:食品类价格指数 + clothing:衣着类价格指数 + roeu:一般日用品价格指数 + dcg:耐用消费品价格指数 + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[1], 3, 600, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') if ct.PY3 else text + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.PPI_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, np.NaN, x)) + if i != 'month': + df[i] = df[i].astype(float) + return df + + +def get_deposit_rate(): + """ + 获取存款利率数据 + Return + -------- + DataFrame + date :变动日期 + deposit_type :存款种类 + rate:利率(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[2], 2, 600, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.DEPOSIT_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, '--', x)) + return df + + +def get_loan_rate(): + """ + 获取贷款利率数据 + Return + -------- + DataFrame + date :执行日期 + loan_type :存款种类 + rate:利率(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[2], 3, 800, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.LOAN_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, '--', x)) + return df + + +def get_rrr(): + """ + 获取存款准备金率数据 + Return + -------- + DataFrame + date :变动日期 + before :调整前存款准备金率(%) + now:调整后存款准备金率(%) + changed:调整幅度(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[2], 4, 100, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.RRR_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, '--', x)) + return df + + +def get_money_supply(): + """ + 获取货币供应量数据 + Return + -------- + DataFrame + month :统计时间 + m2 :货币和准货币(广义货币M2)(亿元) + m2_yoy:货币和准货币(广义货币M2)同比增长(%) + m1:货币(狭义货币M1)(亿元) + m1_yoy:货币(狭义货币M1)同比增长(%) + m0:流通中现金(M0)(亿元) + m0_yoy:流通中现金(M0)同比增长(%) + cd:活期存款(亿元) + cd_yoy:活期存款同比增长(%) + qm:准货币(亿元) + qm_yoy:准货币同比增长(%) + ftd:定期存款(亿元) + ftd_yoy:定期存款同比增长(%) + sd:储蓄存款(亿元) + sd_yoy:储蓄存款同比增长(%) + rests:其他存款(亿元) + rests_yoy:其他存款同比增长(%) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[2], 1, 600, + rdint)) + text = urlopen(request, timeout=10).read() + text = text.decode('gbk') + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, '--', x)) + return df + + +def get_money_supply_bal(): + """ + 获取货币供应量(年底余额)数据 + Return + -------- + DataFrame + year :统计年度 + m2 :货币和准货币(亿元) + m1:货币(亿元) + m0:流通中现金(亿元) + cd:活期存款(亿元) + qm:准货币(亿元) + ftd:定期存款(亿元) + sd:储蓄存款(亿元) + rests:其他存款(亿元) + """ + rdint = vs.random() + request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], + rdint, vs.MACRO_TYPE[2], 0, 200, + rdint)) + text = urlopen(request,timeout=10).read() + text = text.decode('gbk') + regSym = re.compile(r'\,count:(.*?)\}') + datastr = regSym.findall(text) + datastr = datastr[0] + datastr = datastr.split('data:')[1] + js = json.loads(datastr) + df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_BLA_COLS) + for i in df.columns: + df[i] = df[i].apply(lambda x:np.where(x is None, '--', x)) + return df + +import datetime +from decimal import Decimal +import types +import six + + +def is_protected_type(obj): + """Determine if the object instance is of a protected type. + + Objects of protected types are preserved as-is when passed to + force_unicode(strings_only=True). + """ + return isinstance(obj, ( + six.integer_types + + (types.NoneType, + datetime.datetime, datetime.date, datetime.time, + float, Decimal)) + ) + + +def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): + """ + Similar to smart_text, except that lazy instances are resolved to + strings, rather than kept as lazy objects. + + If strings_only is True, don't convert (some) non-string-like objects. + """ + # Handle the common case first, saves 30-40% when s is an instance of + # six.text_type. This function gets called often in that setting. + if isinstance(s, six.text_type): + return s + if strings_only and is_protected_type(s): + return s + try: + if not isinstance(s, six.string_types): + if hasattr(s, '__unicode__'): + s = s.__unicode__() + else: + if six.PY3: + if isinstance(s, bytes): + s = six.text_type(s, encoding, errors) + else: + s = six.text_type(s) + else: + s = six.text_type(bytes(s), encoding, errors) + else: + # Note: We use .decode() here, instead of six.text_type(s, + # encoding, errors), so that if s is a SafeBytes, it ends up being + # a SafeText at the end. + s = s.decode(encoding, errors) + except UnicodeDecodeError as e: + if not isinstance(s, Exception): + raise UnicodeDecodeError(*e.args) + else: + # If we get to here, the caller has passed in an Exception + # subclass populated with non-ASCII bytestring data without a + # working unicode method. Try to handle this without raising a + # further exception by individually forcing the exception args + # to unicode. + s = ' '.join([force_unicode(arg, encoding, strings_only, + errors) for arg in s]) + return s + +############################################################################## +# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/spack/spack +# Please also see the NOTICE and LICENSE files for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from spack import * + + +class PyConfigparser(PythonPackage): + """This library brings the updated configparser from Python 3.5 to + Python 2.6-3.5.""" + + homepage = "https://docs.python.org/3/library/configparser.html" + url = "https://pypi.io/packages/source/c/configparser/configparser-3.5.0.tar.gz" + + version('3.5.0', 'cfdd915a5b7a6c09917a64a573140538') + + depends_on('py-setuptools', type='build') + + # This dependency breaks concretization + # See https://github.com/spack/spack/issues/2793 + # depends_on('py-ordereddict', when='^python@:2.6', type=('build', 'run')) + +### +# Copyright (c) 2005, Jeremiah Fincher +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions, and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions, and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the author of this software nor the name of +# contributors to this software may be used to endorse or promote products +# derived from this software without specific prior written consent. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +### + +from supybot.test import * + +class AutoModeTestCase(PluginTestCase): + plugins = ('AutoMode',) + + +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 2 22:08:22 2014 + +@author: remi +""" + +#trying to order points by octree with python +from numpy import random, sqrt +from sklearn import preprocessing +import matplotlib.pyplot as plt + +#defining a dummy entry :a random 3D pointcloud +pointcloud = random.rand(16*16,2); +index = np.arange(1,16*16+1) + +#parameters +tot_level = 3 ; + +#centering data so that leftmost pint is 0 abs, bottom most point is 0 + +pointcloud[:,0] = pointcloud[:,0]- np.amin(pointcloud[:,0]); +pointcloud[:,1] = pointcloud[:,1]- np.amin(pointcloud[:,1]); + +#finding the max scaling, in X, Y or Z +max_r = max(np.amax(pointcloud[:,0])-np.amin(pointcloud[:,0]), np.amax(pointcloud[:,1])-np.amin(pointcloud[:,1])) + +#dividing so max scale is 0 . Now the point cloud is between 0,1 and 0,1 +pointcloud = pointcloud/ max_r ; + +#we have to trick a litlle, so has that for level 3 for instance, all value are between 0 and 7 included, but not reaching 8. + +pointcloud_int = np.trunc(abs((pointcloud*pow(2,tot_level)-0.0001))).astype(int) + + +plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') ; +plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') ; +plt.axis([-1, 8, -1, 8]) ; +plt.show() ; +plt.close('all'); + +result_point = pointcloud_int[rec_ar[:,0]] +plt.plot(result_point[:,0],result_point[:,1], 'ro') ; + + +rec_ar = np.array(rec) +piv_ar = np.array(piv) +plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ; + + +np.binary_repr(1) +def bin(s): + return str(s) if s<=1 else bin(s>>1) + str(s&1) + +def testBit(int_type, offset): + mask = 1 << offset + return( (int_type & mask)>0 ) +testBit(8,1) +pointcloud_bin = np.binary_repr(pointcloud_int) + + +pointcloud_int >> (tot_level-1) ; +#np.binary_repr(8) +( ((pointcloud_int >> 1 ) << 1) ) >> (tot_level-1) ; +testBit(pointcloud_int[:,1],3) +#cut the input point cloud into 8 based on l bit value starting form right to left +point_cloud_0_0_mask = np.logical_and((testBit(pointcloud_int[:,0],2)==0) , (testBit(pointcloud_int[:,1],2)==0) ) ; +pivot = np.array([pow(2,tot_level-1),pow(2,tot_level-1)]) +pointcloud_centered = pointcloud_int - pivot + +#coordinate to work : + +toto = np.array([1,2,3]) +testBit(toto,1) + +(pointcloud_int >>1 )>>5 + +pow(2,4) +1<<4 + # + +# level 0 +result = list() ; +pointcloud_int ; +index +pivot +cur_lev = 0 +rec = []; + + +#find the 0 level point +min_point = np.argmin(np.sum(np.abs(pointcloud_int - pivot ),axis=1)) +result.append(list((index[min_point],cur_lev))) +#compute the 4 sub parts +for b_x in list((0,1)) : + for b_y in list((0,1)) : + #looping on all 4 sub parts + print b_x, b_y + rec.append (np.logical_and( + (testBit(pointcloud_int[:,0],2)>0)==b_x + ,(testBit(pointcloud_int[:,1],2)>0)==b_y + ) + ) + testBit(pointcloud_int[:,0],2) + print (testBit(pointcloud_int[:,0],2)>0==b_x) ; + print (testBit(pointcloud_int[:,1],2)>0==b_y) ; + rec[b_x,b_y] = np.logical_and((testBit(pointcloud_int[:,0],2)>0==b_x) + ,(testBit(pointcloud_int[:,1],2)>0==b_y) ) + print rec +np.binary_repr(pointcloud_int[:,0] ) +#givne a point cloud +#compute the closest to center + + +def recursive_octree_ordering(point_array,index_array, center_point, level,tot_level, result,piv): + #importing necessary lib + import numpy as np; + + #print for debug + # print '\n\n working on level : '+str(level); + # print 'input points: \n\t',point_array ; + # print 'index_array : \n\t',index_array; + # print 'center_point : \n\t',center_point; + # print 'level : \n\t',level; + # print 'tot_level : \n\t',tot_level; + # print 'result : \n\t',result; + #stopping condition : no points: + + if len(point_array) == 0|level<=2: + return; + #updatig level; + sub_part_level = level+1 ; + + print 'level ',level,' , points remaining : ',len(point_array) ; + print center_point; + piv.append(center_point); + + + #find the closest point to pivot + min_point = np.argmin(np.sum(np.abs(point_array - center_point ),axis=1)) + result.append(list((index_array[min_point],level))) ; + #removing the found point from the array of points + #np.delete(point_array, min_point, axis=0) ; + #np.delete(index_array, min_point, axis=0) ; + + #stopping if it remains only one pioint : we won't divide further, same if we have reached max depth + if (len(point_array) ==1 )|(level >= tot_level): + return; + #compute the 4 sub parts + for b_x in list((0,1)) : + for b_y in list((0,1)) : + #looping on all 4 sub parts + print (b_x*2-1), (b_y*2-1) ; + udpate_to_pivot = np.asarray([ (b_x*2-1)*(pow(2,tot_level - level -2 )) + ,(b_y*2-1)*(pow(2,tot_level - level -2 )) + ]); + sub_part_center_point = center_point +udpate_to_pivot; + + + + # we want to iterateon + # we need to update : : point_array , index_array center_point , level + #update point_array and index_array : we need to find the points that are in the subparts + #update center point, we need to add/substract to previous pivot 2^level+11 + + #find the points concerned : + point_in_subpart_mask = np.logical_and( + testBit(point_array[:,0],tot_level - level-1) ==b_x + , testBit(point_array[:,1],tot_level - level -1) ==b_y ) ; + sub_part_points= point_array[point_in_subpart_mask]; + sub_part_index = index_array[point_in_subpart_mask]; + sub_part_center_point = center_point + np.asarray([ + (b_x*2-1)*(pow(2,tot_level - level -2 )) + ,(b_y*2-1)*(pow(2,tot_level - level -2 )) + ]); + + + if len(sub_part_points)>=1: + recursive_octree_ordering(sub_part_points + ,sub_part_index + , sub_part_center_point + , sub_part_level + , tot_level + , result + , piv); + continue; + else: + print 'at televel ',level,'bx by:',b_x,' ',b_y,' refusing to go one, ', len(sub_part_points), ' points remaining fo this' + continue; + +rec = [] ; +piv = [] ; +recursive_octree_ordering(pointcloud_int,index,pivot,0,3,rec, piv ); +#recursive_octree_ordering(pointcloud_int,index, np.array([2,2]),1,3,rec, piv ); +piv_ar = np.array(piv) +plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ; + + +plot(x=pointcloud_int[:,0].T,y=pointcloud_int[:,1].T, marker='o', color='r', ls='' ) +plt.plot(pointcloud_int.T, marker='o', color='r', ls='') + +plt.imsave('/') + +from mpl_toolkits.mplot3d import Axes3D + +plt.scatter(pointcloud[:,0], pointcloud[:,1],c='red'); +plt.scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green'); +plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') +plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') +plt.axis([-1, 8, -1, 8]) +plt.show(); + +fig = plt.figure() +ax = fig.add_subplot(111) +ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1]); +ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1], pointcloud_int[:,0], zdir='z', c= 'red') +fig.show() + + +fig, axes = plt.subplots(1, 2, figsize=(12,3)) +axes[0].scatter(pointcloud[:,0], pointcloud[:,1],c='red'); +axes[1].scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green'); +fig.show(); + +for f in list((0,1)): + (f*2-1) + + +import octree_ordering +# -*- encoding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2011 Numérigraphe SARL. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +import bank + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + + +# -*- coding: utf-8 -*- +# Part of Odoo. See LICENSE file for full copyright and licensing details. + +import datetime +import logging + +import requests +import werkzeug.urls + +from ast import literal_eval + +from odoo import api, release, SUPERUSER_ID +from odoo.exceptions import UserError +from odoo.models import AbstractModel +from odoo.tools.translate import _ +from odoo.tools import config, misc, ustr + +_logger = logging.getLogger(__name__) + + +class PublisherWarrantyContract(AbstractModel): + _name = "publisher_warranty.contract" + + @api.model + def _get_message(self): + Users = self.env['res.users'] + IrParamSudo = self.env['ir.config_parameter'].sudo() + + dbuuid = IrParamSudo.get_param('database.uuid') + db_create_date = IrParamSudo.get_param('database.create_date') + limit_date = datetime.datetime.now() + limit_date = limit_date - datetime.timedelta(15) + limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT) + nbr_users = Users.search_count([('active', '=', True)]) + nbr_active_users = Users.search_count([("login_date", ">=", limit_date_str), ('active', '=', True)]) + nbr_share_users = 0 + nbr_active_share_users = 0 + if "share" in Users._fields: + nbr_share_users = Users.search_count([("share", "=", True), ('active', '=', True)]) + nbr_active_share_users = Users.search_count([("share", "=", True), ("login_date", ">=", limit_date_str), ('active', '=', True)]) + user = self.env.user + domain = [('application', '=', True), ('state', 'in', ['installed', 'to upgrade', 'to remove'])] + apps = self.env['ir.module.module'].sudo().search_read(domain, ['name']) + + enterprise_code = IrParamSudo.get_param('database.enterprise_code') + + web_base_url = IrParamSudo.get_param('web.base.url') + msg = { + "dbuuid": dbuuid, + "nbr_users": nbr_users, + "nbr_active_users": nbr_active_users, + "nbr_share_users": nbr_share_users, + "nbr_active_share_users": nbr_active_share_users, + "dbname": self._cr.dbname, + "db_create_date": db_create_date, + "version": release.version, + "language": user.lang, + "web_base_url": web_base_url, + "apps": [app['name'] for app in apps], + "enterprise_code": enterprise_code, + } + if user.partner_id.company_id: + company_id = user.partner_id.company_id + msg.update(company_id.read(["name", "email", "phone"])[0]) + return msg + + @api.model + def _get_sys_logs(self): + """ + Utility method to send a publisher warranty get logs messages. + """ + msg = self._get_message() + arguments = {'arg0': ustr(msg), "action": "update"} + + url = config.get("publisher_warranty_url") + + r = requests.post(url, data=arguments, timeout=30) + r.raise_for_status() + return literal_eval(r.text) + + @api.multi + def update_notification(self, cron_mode=True): + """ + Send a message to Odoo's publisher warranty server to check the + validity of the contracts, get notifications, etc... + + @param cron_mode: If true, catch all exceptions (appropriate for usage in a cron). + @type cron_mode: boolean + """ + try: + try: + result = self._get_sys_logs() + except Exception: + if cron_mode: # we don't want to see any stack trace in cron + return False + _logger.debug("Exception while sending a get logs messages", exc_info=1) + raise UserError(_("Error during communication with the publisher warranty server.")) + # old behavior based on res.log; now on mail.message, that is not necessarily installed + user = self.env['res.users'].sudo().browse(SUPERUSER_ID) + poster = self.sudo().env.ref('mail.channel_all_employees') + if not (poster and poster.exists()): + if not user.exists(): + return True + poster = user + for message in result["messages"]: + try: + poster.message_post(body=message, subtype='mt_comment', partner_ids=[user.partner_id.id]) + except Exception: + pass + if result.get('enterprise_info'): + # Update expiration date + set_param = self.env['ir.config_parameter'].sudo().set_param + set_param('database.expiration_date', result['enterprise_info'].get('expiration_date')) + set_param('database.expiration_reason', result['enterprise_info'].get('expiration_reason', 'trial')) + set_param('database.enterprise_code', result['enterprise_info'].get('enterprise_code')) + + except Exception: + if cron_mode: + return False # we don't want to see any stack trace in cron + else: + raise + return True + +"""Support for functionality to have conversations with Home Assistant.""" +import logging +import re + +import voluptuous as vol + +from homeassistant import core +from homeassistant.components import http, websocket_api +from homeassistant.components.http.data_validator import RequestDataValidator +from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR +from homeassistant.helpers import config_validation as cv, intent +from homeassistant.loader import bind_hass + +from .agent import AbstractConversationAgent +from .default_agent import DefaultAgent, async_register + +_LOGGER = logging.getLogger(__name__) + +ATTR_TEXT = "text" + +DOMAIN = "conversation" + +REGEX_TYPE = type(re.compile("")) +DATA_AGENT = "conversation_agent" +DATA_CONFIG = "conversation_config" + +SERVICE_PROCESS = "process" + +SERVICE_PROCESS_SCHEMA = vol.Schema({vol.Required(ATTR_TEXT): cv.string}) + +CONFIG_SCHEMA = vol.Schema( + { + DOMAIN: vol.Schema( + { + vol.Optional("intents"): vol.Schema( + {cv.string: vol.All(cv.ensure_list, [cv.string])} + ) + } + ) + }, + extra=vol.ALLOW_EXTRA, +) + +async_register = bind_hass(async_register) + + +@core.callback +@bind_hass +def async_set_agent(hass: core.HomeAssistant, agent: AbstractConversationAgent): + """Set the agent to handle the conversations.""" + hass.data[DATA_AGENT] = agent + + +async def async_setup(hass, config): + """Register the process service.""" + hass.data[DATA_CONFIG] = config + + async def handle_service(service): + """Parse text into commands.""" + text = service.data[ATTR_TEXT] + _LOGGER.debug("Processing: <%s>", text) + agent = await _get_agent(hass) + try: + await agent.async_process(text, service.context) + except intent.IntentHandleError as err: + _LOGGER.error("Error processing %s: %s", text, err) + + hass.services.async_register( + DOMAIN, SERVICE_PROCESS, handle_service, schema=SERVICE_PROCESS_SCHEMA + ) + hass.http.register_view(ConversationProcessView()) + hass.components.websocket_api.async_register_command(websocket_process) + hass.components.websocket_api.async_register_command(websocket_get_agent_info) + hass.components.websocket_api.async_register_command(websocket_set_onboarding) + + return True + + +@websocket_api.async_response +@websocket_api.websocket_command( + {"type": "conversation/process", "text": str, vol.Optional("conversation_id"): str} +) +async def websocket_process(hass, connection, msg): + """Process text.""" + connection.send_result( + msg["id"], + await _async_converse( + hass, msg["text"], msg.get("conversation_id"), connection.context(msg) + ), + ) + + +@websocket_api.async_response +@websocket_api.websocket_command({"type": "conversation/agent/info"}) +async def websocket_get_agent_info(hass, connection, msg): + """Do we need onboarding.""" + agent = await _get_agent(hass) + + connection.send_result( + msg["id"], + { + "onboarding": await agent.async_get_onboarding(), + "attribution": agent.attribution, + }, + ) + + +@websocket_api.async_response +@websocket_api.websocket_command({"type": "conversation/onboarding/set", "shown": bool}) +async def websocket_set_onboarding(hass, connection, msg): + """Set onboarding status.""" + agent = await _get_agent(hass) + + success = await agent.async_set_onboarding(msg["shown"]) + + if success: + connection.send_result(msg["id"]) + else: + connection.send_error(msg["id"]) + + +class ConversationProcessView(http.HomeAssistantView): + """View to process text.""" + + url = "/api/conversation/process" + name = "api:conversation:process" + + @RequestDataValidator( + vol.Schema({vol.Required("text"): str, vol.Optional("conversation_id"): str}) + ) + async def post(self, request, data): + """Send a request for processing.""" + hass = request.app["hass"] + + try: + intent_result = await _async_converse( + hass, data["text"], data.get("conversation_id"), self.context(request) + ) + except intent.IntentError as err: + _LOGGER.error("Error handling intent: %s", err) + return self.json( + { + "success": False, + "error": { + "code": str(err.__class__.__name__).lower(), + "message": str(err), + }, + }, + status_code=HTTP_INTERNAL_SERVER_ERROR, + ) + + return self.json(intent_result) + + +async def _get_agent(hass: core.HomeAssistant) -> AbstractConversationAgent: + """Get the active conversation agent.""" + agent = hass.data.get(DATA_AGENT) + if agent is None: + agent = hass.data[DATA_AGENT] = DefaultAgent(hass) + await agent.async_initialize(hass.data.get(DATA_CONFIG)) + return agent + + +async def _async_converse( + hass: core.HomeAssistant, text: str, conversation_id: str, context: core.Context +) -> intent.IntentResponse: + """Process text and get intent.""" + agent = await _get_agent(hass) + try: + intent_result = await agent.async_process(text, context, conversation_id) + except intent.IntentHandleError as err: + intent_result = intent.IntentResponse() + intent_result.async_set_speech(str(err)) + + if intent_result is None: + intent_result = intent.IntentResponse() + intent_result.async_set_speech("Sorry, I didn't understand that") + + return intent_result + +# -*- coding: utf-8 -*- +# Copyright (c) 2015, Vispy Development Team. +# Distributed under the (new) BSD License. See LICENSE.txt for more info. +""" +Some wrappers to avoid circular imports, or make certain calls easier. + +The idea of a 'global' vispy.use function is that although vispy.app +and vispy.gloo.gl can be used independently, they are not complely +independent for some configureation. E.g. when using real ES 2.0, +the app backend should use EGL and not a desktop OpenGL context. Also, +we probably want it to be easy to configure vispy to use the ipython +notebook backend, which requires specifc config of both app and gl. + +This module does not have to be aware of the available app and gl +backends, but it should be(come) aware of (in)compatibilities between +them. +""" + +import subprocess +import inspect + + +def use(app=None, gl=None): + """ Set the usage options for vispy + + Specify what app backend and GL backend to use. + + Parameters + ---------- + app : str + The app backend to use (case insensitive). Standard backends: + * 'PyQt4': use Qt widget toolkit via PyQt4. + * 'PyQt5': use Qt widget toolkit via PyQt5. + * 'PySide': use Qt widget toolkit via PySide. + * 'PyGlet': use Pyglet backend. + * 'Glfw': use Glfw backend (successor of Glut). Widely available + on Linux. + * 'SDL2': use SDL v2 backend. + * 'osmesa': Use OSMesa backend + Additional backends: + * 'ipynb_vnc': render in the IPython notebook via a VNC approach + (experimental) + gl : str + The gl backend to use (case insensitive). Options are: + * 'gl2': use Vispy's desktop OpenGL API. + * 'pyopengl2': use PyOpenGL's desktop OpenGL API. Mostly for + testing. + * 'es2': (TO COME) use real OpenGL ES 2.0 on Windows via Angle. + Availability of ES 2.0 is larger for Windows, since it relies + on DirectX. + * 'gl+': use the full OpenGL functionality available on + your system (via PyOpenGL). + + Notes + ----- + If the app option is given, ``vispy.app.use_app()`` is called. If + the gl option is given, ``vispy.gloo.use_gl()`` is called. + + If an app backend name is provided, and that backend could not be + loaded, an error is raised. + + If no backend name is provided, Vispy will first check if the GUI + toolkit corresponding to each backend is already imported, and try + that backend first. If this is unsuccessful, it will try the + 'default_backend' provided in the vispy config. If still not + succesful, it will try each backend in a predetermined order. + + See Also + -------- + vispy.app.use_app + vispy.gloo.gl.use_gl + """ + if app is None and gl is None: + raise TypeError('Must specify at least one of "app" or "gl".') + + # Example for future. This wont work (yet). + if app == 'ipynb_webgl': + app = 'headless' + gl = 'webgl' + + if app == 'osmesa': + from ..util.osmesa_gl import fix_osmesa_gl_lib + fix_osmesa_gl_lib() + if gl is not None: + raise ValueError("Do not specify gl when using osmesa") + + # Apply now + if gl: + from .. import gloo, config + config['gl_backend'] = gl + gloo.gl.use_gl(gl) + if app: + from ..app import use_app + use_app(app) + + +def run_subprocess(command, return_code=False, **kwargs): + """Run command using subprocess.Popen + + Run command and wait for command to complete. If the return code was zero + then return, otherwise raise CalledProcessError. + By default, this will also add stdout= and stderr=subproces.PIPE + to the call to Popen to suppress printing to the terminal. + + Parameters + ---------- + command : list of str + Command to run as subprocess (see subprocess.Popen documentation). + return_code : bool + If True, the returncode will be returned, and no error checking + will be performed (so this function should always return without + error). + **kwargs : dict + Additional kwargs to pass to ``subprocess.Popen``. + + Returns + ------- + stdout : str + Stdout returned by the process. + stderr : str + Stderr returned by the process. + code : int + The command exit code. Only returned if ``return_code`` is True. + """ + # code adapted with permission from mne-python + use_kwargs = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE) + use_kwargs.update(kwargs) + + p = subprocess.Popen(command, **use_kwargs) + output = p.communicate() + + # communicate() may return bytes, str, or None depending on the kwargs + # passed to Popen(). Convert all to unicode str: + output = ['' if s is None else s for s in output] + output = [s.decode('utf-8') if isinstance(s, bytes) else s for s in output] + output = tuple(output) + + if not return_code and p.returncode: + print(output[0]) + print(output[1]) + err_fun = subprocess.CalledProcessError.__init__ + if 'output' in inspect.getargspec(err_fun).args: + raise subprocess.CalledProcessError(p.returncode, command, output) + else: + raise subprocess.CalledProcessError(p.returncode, command) + if return_code: + output = output + (p.returncode,) + return output + +""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1254', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u20ac' # 0x80 -> EURO SIGN + '\ufffe' # 0x81 -> UNDEFINED + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE + '\ufffe' # 0x8D -> UNDEFINED + '\ufffe' # 0x8E -> UNDEFINED + '\ufffe' # 0x8F -> UNDEFINED + '\ufffe' # 0x90 -> UNDEFINED + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\u02dc' # 0x98 -> SMALL TILDE + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE + '\ufffe' # 0x9D -> UNDEFINED + '\ufffe' # 0x9E -> UNDEFINED + '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK + '\xa2' # 0xA2 -> CENT SIGN + '\xa3' # 0xA3 -> POUND SIGN + '\xa4' # 0xA4 -> CURRENCY SIGN + '\xa5' # 0xA5 -> YEN SIGN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\xa8' # 0xA8 -> DIAERESIS + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\xaf' # 0xAF -> MACRON + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\xb2' # 0xB2 -> SUPERSCRIPT TWO + '\xb3' # 0xB3 -> SUPERSCRIPT THREE + '\xb4' # 0xB4 -> ACUTE ACCENT + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\xb8' # 0xB8 -> CEDILLA + '\xb9' # 0xB9 -> SUPERSCRIPT ONE + '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER + '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF + '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS + '\xbf' # 0xBF -> INVERTED QUESTION MARK + '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE + '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE + '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE + '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE + '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX + '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE + '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS + '\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE + '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE + '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE + '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE + '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xd7' # 0xD7 -> MULTIPLICATION SIGN + '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE + '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE + '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE + '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX + '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE + '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA + '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S + '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE + '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE + '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE + '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe6' # 0xE6 -> LATIN SMALL LETTER AE + '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA + '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE + '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE + '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS + '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE + '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE + '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS + '\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE + '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE + '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE + '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE + '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE + '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf7' # 0xF7 -> DIVISION SIGN + '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE + '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE + '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE + '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS + '\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I + '\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA + '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) + +# antfs-cli distutils setup script +# +# Copyright (c) 2012, Gustav Tiger +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from __future__ import absolute_import, print_function + +from setuptools import setup + +try: + with open('README.md') as file: + long_description = file.read() +except IOError: + long_description = '' + +setup(name='antfs-cli', + version='0.2', + + description='ANT-FS Command Line Interface', + long_description=long_description, + + author='Gustav Tiger', + author_email='gustav@tiger.name', + + packages=['antfs_cli'], + entry_points={ + 'console_scripts': ['antfs-cli=antfs_cli.program:main'] + }, + + url='https://github.com/Tigge/antfs-cli', + + classifiers=['Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: End Users/Desktop', + 'Intended Audience :: Healthcare Industry', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4'], + + dependency_links=['git+https://github.com/Tigge/openant.git#egg=openant-0.2'], + install_requires=['openant>=0.2'], + + test_suite='tests') + +''' +Test one delayed preaccept callback +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + +Test.Summary = ''' +Test different combinations of TLS handshake hooks to ensure they are applied consistently. +''' + +Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work")) + +ts = Test.MakeATSProcess("ts", select_ports=False) +server = Test.MakeOriginServer("server") +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +# desired response form the origin server +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +server.addResponse("sessionlog.json", request_header, response_header) + +ts.addSSLfile("ssl/server.pem") +ts.addSSLfile("ssl/server.key") + +ts.Variables.ssl_port = 4443 +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # enable ssl port + 'proxy.config.http.server_ports': '{0}:ssl'.format(ts.Variables.ssl_port), + 'proxy.config.ssl.client.verify.server': 0, + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', +}) + +ts.Disk.ssl_multicert_config.AddLine( + 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' +) + +ts.Disk.remap_config.AddLine( + 'map https://example.com:4443 http://127.0.0.1:{0}'.format(server.Variables.Port) +) + +Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssl_hook_test.cc'), ts, '-d=1') + +tr = Test.AddTestRun("Test one delayed preaccept hook") +tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) +tr.StillRunningAfter = ts +tr.StillRunningAfter = server +tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stdout = "gold/preaccept-1.gold" + +ts.Streams.stderr = "gold/ts-preaccept-delayed-1.gold" + +preacceptstring = "Pre accept delay callback 0" +ts.Streams.All = Testers.ContainsExpression( + "\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring), "Pre accept message appears only once or twice", reflags=re.S | re.M) +tr.Processes.Default.TimeOut = 5 +tr.TimeOut = 5 + +# Copyright 2019 Fortinet, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import json +import pytest +from mock import ANY +from ansible.module_utils.network.fortios.fortios import FortiOSHandler + +try: + from ansible.modules.network.fortios import fortios_switch_controller_security_policy_captive_portal +except ImportError: + pytest.skip("Could not load required modules for testing", allow_module_level=True) + + +@pytest.fixture(autouse=True) +def connection_mock(mocker): + connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_security_policy_captive_portal.Connection') + return connection_class_mock + + +fos_instance = FortiOSHandler(connection_mock) + + +def test_switch_controller_security_policy_captive_portal_creation(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} + set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) + + input_data = { + 'username': 'admin', + 'state': 'present', + 'switch_controller_security_policy_captive_portal': { + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + expected_data = { + 'name': 'default_name_3', + 'policy-type': 'captive-portal', + 'vlan': 'test_value_5' + } + + set_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', data=expected_data, vdom='root') + schema_method_mock.assert_not_called() + assert not is_error + assert changed + assert response['status'] == 'success' + assert response['http_status'] == 200 + + +def test_switch_controller_security_policy_captive_portal_creation_fails(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} + set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) + + input_data = { + 'username': 'admin', + 'state': 'present', + 'switch_controller_security_policy_captive_portal': { + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + expected_data = { + 'name': 'default_name_3', + 'policy-type': 'captive-portal', + 'vlan': 'test_value_5' + } + + set_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', data=expected_data, vdom='root') + schema_method_mock.assert_not_called() + assert is_error + assert not changed + assert response['status'] == 'error' + assert response['http_status'] == 500 + + +def test_switch_controller_security_policy_captive_portal_removal(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} + delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) + + input_data = { + 'username': 'admin', + 'state': 'absent', + 'switch_controller_security_policy_captive_portal': { + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + delete_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', mkey=ANY, vdom='root') + schema_method_mock.assert_not_called() + assert not is_error + assert changed + assert response['status'] == 'success' + assert response['http_status'] == 200 + + +def test_switch_controller_security_policy_captive_portal_deletion_fails(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} + delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) + + input_data = { + 'username': 'admin', + 'state': 'absent', + 'switch_controller_security_policy_captive_portal': { + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + delete_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', mkey=ANY, vdom='root') + schema_method_mock.assert_not_called() + assert is_error + assert not changed + assert response['status'] == 'error' + assert response['http_status'] == 500 + + +def test_switch_controller_security_policy_captive_portal_idempotent(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} + set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) + + input_data = { + 'username': 'admin', + 'state': 'present', + 'switch_controller_security_policy_captive_portal': { + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + expected_data = { + 'name': 'default_name_3', + 'policy-type': 'captive-portal', + 'vlan': 'test_value_5' + } + + set_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', data=expected_data, vdom='root') + schema_method_mock.assert_not_called() + assert not is_error + assert not changed + assert response['status'] == 'error' + assert response['http_status'] == 404 + + +def test_switch_controller_security_policy_captive_portal_filter_foreign_attributes(mocker): + schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') + + set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} + set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) + + input_data = { + 'username': 'admin', + 'state': 'present', + 'switch_controller_security_policy_captive_portal': { + 'random_attribute_not_valid': 'tag', + 'name': 'default_name_3', + 'policy_type': 'captive-portal', + 'vlan': 'test_value_5' + }, + 'vdom': 'root'} + + is_error, changed, response = fortios_switch_controller_security_policy_captive_portal.fortios_switch_controller_security_policy(input_data, fos_instance) + + expected_data = { + 'name': 'default_name_3', + 'policy-type': 'captive-portal', + 'vlan': 'test_value_5' + } + + set_method_mock.assert_called_with('switch-controller.security-policy', 'captive-portal', data=expected_data, vdom='root') + schema_method_mock.assert_not_called() + assert not is_error + assert changed + assert response['status'] == 'success' + assert response['http_status'] == 200 + +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, +) + + +class KhanAcademyIE(InfoExtractor): + _VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P[^/]+)/(?:[^/]+/){,2}(?P[^?#/]+)(?:$|[?#])' + IE_NAME = 'KhanAcademy' + + _TESTS = [{ + 'url': 'http://www.khanacademy.org/video/one-time-pad', + 'md5': '7021db7f2d47d4fff89b13177cb1e8f4', + 'info_dict': { + 'id': 'one-time-pad', + 'ext': 'mp4', + 'title': 'The one-time pad', + 'description': 'The perfect cipher', + 'duration': 176, + 'uploader': 'Brit Cruise', + 'uploader_id': 'khanacademy', + 'upload_date': '20120411', + }, + 'add_ie': ['Youtube'], + }, { + 'url': 'https://www.khanacademy.org/math/applied-math/cryptography', + 'info_dict': { + 'id': 'cryptography', + 'title': 'Journey into cryptography', + 'description': 'How have humans protected their secret messages through history? What has changed today?', + }, + 'playlist_mincount': 3, + }] + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + + if m.group('key') == 'video': + data = self._download_json( + 'http://api.khanacademy.org/api/v1/videos/' + video_id, + video_id, 'Downloading video info') + + upload_date = unified_strdate(data['date_added']) + uploader = ', '.join(data['author_names']) + return { + '_type': 'url_transparent', + 'url': data['url'], + 'id': video_id, + 'title': data['title'], + 'thumbnail': data['image_url'], + 'duration': data['duration'], + 'description': data['description'], + 'uploader': uploader, + 'upload_date': upload_date, + } + else: + # topic + data = self._download_json( + 'http://api.khanacademy.org/api/v1/topic/' + video_id, + video_id, 'Downloading topic info') + + entries = [ + { + '_type': 'url', + 'url': c['url'], + 'id': c['id'], + 'title': c['title'], + } + for c in data['children'] if c['kind'] in ('Video', 'Topic')] + + return { + '_type': 'playlist', + 'id': video_id, + 'title': data['title'], + 'description': data['description'], + 'entries': entries, + } + +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes as dtypes_lib +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gradient_checker +from tensorflow.python.ops import gradients_impl +from tensorflow.python.platform import test +from tensorflow.python.platform import tf_logging + + +class MatrixDiagTest(test.TestCase): + + def testVector(self): + with self.test_session(use_gpu=True): + v = np.array([1.0, 2.0, 3.0]) + mat = np.diag(v) + v_diag = array_ops.matrix_diag(v) + self.assertEqual((3, 3), v_diag.get_shape()) + self.assertAllEqual(v_diag.eval(), mat) + + def _testBatchVector(self, dtype): + with self.test_session(use_gpu=True): + v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype) + mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]], + [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0], + [0.0, 0.0, 6.0]]]).astype(dtype) + v_batch_diag = array_ops.matrix_diag(v_batch) + self.assertEqual((2, 3, 3), v_batch_diag.get_shape()) + self.assertAllEqual(v_batch_diag.eval(), mat_batch) + + def testBatchVector(self): + self._testBatchVector(np.float32) + self._testBatchVector(np.float64) + self._testBatchVector(np.int32) + self._testBatchVector(np.int64) + self._testBatchVector(np.bool) + + def testInvalidShape(self): + with self.assertRaisesRegexp(ValueError, "must be at least rank 1"): + array_ops.matrix_diag(0) + + def testInvalidShapeAtEval(self): + with self.test_session(use_gpu=True): + v = array_ops.placeholder(dtype=dtypes_lib.float32) + with self.assertRaisesOpError("input must be at least 1-dim"): + array_ops.matrix_diag(v).eval(feed_dict={v: 0.0}) + + def testGrad(self): + shapes = ((3,), (7, 4)) + with self.test_session(use_gpu=True): + for shape in shapes: + x = constant_op.constant(np.random.rand(*shape), np.float32) + y = array_ops.matrix_diag(x) + error = gradient_checker.compute_gradient_error(x, + x.get_shape().as_list(), + y, + y.get_shape().as_list()) + self.assertLess(error, 1e-4) + + +class MatrixSetDiagTest(test.TestCase): + + def testSquare(self): + with self.test_session(use_gpu=True): + v = np.array([1.0, 2.0, 3.0]) + mat = np.array([[0.0, 1.0, 0.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 1.0]]) + mat_set_diag = np.array([[1.0, 1.0, 0.0], + [1.0, 2.0, 1.0], + [1.0, 1.0, 3.0]]) + output = array_ops.matrix_set_diag(mat, v) + self.assertEqual((3, 3), output.get_shape()) + self.assertAllEqual(mat_set_diag, output.eval()) + + def testRectangular(self): + with self.test_session(use_gpu=True): + v = np.array([3.0, 4.0]) + mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]]) + expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]]) + output = array_ops.matrix_set_diag(mat, v) + self.assertEqual((2, 3), output.get_shape()) + self.assertAllEqual(expected, output.eval()) + + v = np.array([3.0, 4.0]) + mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]) + expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]]) + output = array_ops.matrix_set_diag(mat, v) + self.assertEqual((3, 2), output.get_shape()) + self.assertAllEqual(expected, output.eval()) + + def _testSquareBatch(self, dtype): + with self.test_session(use_gpu=True): + v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype) + mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]], + [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], + [2.0, 0.0, 6.0]]]).astype(dtype) + + mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], + [1.0, 0.0, -3.0]], + [[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], + [2.0, 0.0, -6.0]]]).astype(dtype) + + output = array_ops.matrix_set_diag(mat_batch, v_batch) + self.assertEqual((2, 3, 3), output.get_shape()) + self.assertAllEqual(mat_set_diag_batch, output.eval()) + + def testSquareBatch(self): + self._testSquareBatch(np.float32) + self._testSquareBatch(np.float64) + self._testSquareBatch(np.int32) + self._testSquareBatch(np.int64) + self._testSquareBatch(np.bool) + + def testRectangularBatch(self): + with self.test_session(use_gpu=True): + v_batch = np.array([[-1.0, -2.0], + [-4.0, -5.0]]) + mat_batch = np.array( + [[[1.0, 0.0, 3.0], + [0.0, 2.0, 0.0]], + [[4.0, 0.0, 4.0], + [0.0, 5.0, 0.0]]]) + + mat_set_diag_batch = np.array( + [[[-1.0, 0.0, 3.0], + [0.0, -2.0, 0.0]], + [[-4.0, 0.0, 4.0], + [0.0, -5.0, 0.0]]]) + output = array_ops.matrix_set_diag(mat_batch, v_batch) + self.assertEqual((2, 2, 3), output.get_shape()) + self.assertAllEqual(mat_set_diag_batch, output.eval()) + + def testInvalidShape(self): + with self.assertRaisesRegexp(ValueError, "must be at least rank 2"): + array_ops.matrix_set_diag(0, [0]) + with self.assertRaisesRegexp(ValueError, "must be at least rank 1"): + array_ops.matrix_set_diag([[0]], 0) + + def testInvalidShapeAtEval(self): + with self.test_session(use_gpu=True): + v = array_ops.placeholder(dtype=dtypes_lib.float32) + with self.assertRaisesOpError("input must be at least 2-dim"): + array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0}) + with self.assertRaisesOpError( + r"but received input shape: \[1,1\] and diagonal shape: \[\]"): + array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0}) + + def testGrad(self): + shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8)) + with self.test_session(use_gpu=True): + for shape in shapes: + x = constant_op.constant( + np.random.rand(*shape), dtype=dtypes_lib.float32) + diag_shape = shape[:-2] + (min(shape[-2:]),) + x_diag = constant_op.constant( + np.random.rand(*diag_shape), dtype=dtypes_lib.float32) + y = array_ops.matrix_set_diag(x, x_diag) + error_x = gradient_checker.compute_gradient_error( + x, x.get_shape().as_list(), y, y.get_shape().as_list()) + self.assertLess(error_x, 1e-4) + error_x_diag = gradient_checker.compute_gradient_error( + x_diag, x_diag.get_shape().as_list(), y, y.get_shape().as_list()) + self.assertLess(error_x_diag, 1e-4) + + def testGradWithNoShapeInformation(self): + with self.test_session(use_gpu=True) as sess: + v = array_ops.placeholder(dtype=dtypes_lib.float32) + mat = array_ops.placeholder(dtype=dtypes_lib.float32) + grad_input = array_ops.placeholder(dtype=dtypes_lib.float32) + output = array_ops.matrix_set_diag(mat, v) + grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input) + grad_input_val = np.random.rand(3, 3).astype(np.float32) + grad_vals = sess.run(grads, + feed_dict={ + v: 2 * np.ones(3), + mat: np.ones((3, 3)), + grad_input: grad_input_val + }) + self.assertAllEqual(np.diag(grad_input_val), grad_vals[1]) + self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)), + grad_vals[0]) + + +class MatrixDiagPartTest(test.TestCase): + + def testSquare(self): + with self.test_session(use_gpu=True): + v = np.array([1.0, 2.0, 3.0]) + mat = np.diag(v) + mat_diag = array_ops.matrix_diag_part(mat) + self.assertEqual((3,), mat_diag.get_shape()) + self.assertAllEqual(mat_diag.eval(), v) + + def testRectangular(self): + with self.test_session(use_gpu=True): + mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + mat_diag = array_ops.matrix_diag_part(mat) + self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0])) + mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) + mat_diag = array_ops.matrix_diag_part(mat) + self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0])) + + def _testSquareBatch(self, dtype): + with self.test_session(use_gpu=True): + v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype) + mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]], + [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0], + [0.0, 0.0, 6.0]]]).astype(dtype) + self.assertEqual(mat_batch.shape, (2, 3, 3)) + mat_batch_diag = array_ops.matrix_diag_part(mat_batch) + self.assertEqual((2, 3), mat_batch_diag.get_shape()) + self.assertAllEqual(mat_batch_diag.eval(), v_batch) + + def testSquareBatch(self): + self._testSquareBatch(np.float32) + self._testSquareBatch(np.float64) + self._testSquareBatch(np.int32) + self._testSquareBatch(np.int64) + self._testSquareBatch(np.bool) + + def testRectangularBatch(self): + with self.test_session(use_gpu=True): + v_batch = np.array([[1.0, 2.0], + [4.0, 5.0]]) + mat_batch = np.array( + [[[1.0, 0.0, 0.0], + [0.0, 2.0, 0.0]], + [[4.0, 0.0, 0.0], + [0.0, 5.0, 0.0]]]) + self.assertEqual(mat_batch.shape, (2, 2, 3)) + mat_batch_diag = array_ops.matrix_diag_part(mat_batch) + self.assertEqual((2, 2), mat_batch_diag.get_shape()) + self.assertAllEqual(mat_batch_diag.eval(), v_batch) + + def testInvalidShape(self): + with self.assertRaisesRegexp(ValueError, "must be at least rank 2"): + array_ops.matrix_diag_part(0) + + def testInvalidShapeAtEval(self): + with self.test_session(use_gpu=True): + v = array_ops.placeholder(dtype=dtypes_lib.float32) + with self.assertRaisesOpError("input must be at least 2-dim"): + array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0}) + + def testGrad(self): + shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3)) + with self.test_session(use_gpu=True): + for shape in shapes: + x = constant_op.constant(np.random.rand(*shape), dtype=np.float32) + y = array_ops.matrix_diag_part(x) + error = gradient_checker.compute_gradient_error(x, + x.get_shape().as_list(), + y, + y.get_shape().as_list()) + self.assertLess(error, 1e-4) + + +class DiagTest(test.TestCase): + + def diagOp(self, diag, dtype, expected_ans, use_gpu=False): + with self.test_session(use_gpu=use_gpu): + tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype))) + out = tf_ans.eval() + tf_ans_inv = array_ops.diag_part(expected_ans) + inv_out = tf_ans_inv.eval() + self.assertAllClose(out, expected_ans) + self.assertAllClose(inv_out, diag) + self.assertShapeEqual(expected_ans, tf_ans) + self.assertShapeEqual(diag, tf_ans_inv) + + def testEmptyTensor(self): + x = np.array([]) + expected_ans = np.empty([0, 0]) + self.diagOp(x, np.int32, expected_ans) + + def testRankOneIntTensor(self): + x = np.array([1, 2, 3]) + expected_ans = np.array( + [[1, 0, 0], + [0, 2, 0], + [0, 0, 3]]) + self.diagOp(x, np.int32, expected_ans) + self.diagOp(x, np.int64, expected_ans) + + def testRankOneFloatTensor(self): + x = np.array([1.1, 2.2, 3.3]) + expected_ans = np.array( + [[1.1, 0, 0], + [0, 2.2, 0], + [0, 0, 3.3]]) + self.diagOp(x, np.float32, expected_ans) + self.diagOp(x, np.float64, expected_ans) + + def testRankOneComplexTensor(self): + for dtype in [np.complex64, np.complex128]: + x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype) + expected_ans = np.array( + [[1.1 + 1.1j, 0 + 0j, 0 + 0j], + [0 + 0j, 2.2 + 2.2j, 0 + 0j], + [0 + 0j, 0 + 0j, 3.3 + 3.3j]], dtype=dtype) + self.diagOp(x, dtype, expected_ans) + + def testRankTwoIntTensor(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + expected_ans = np.array( + [[[[1, 0, 0], [0, 0, 0]], + [[0, 2, 0], [0, 0, 0]], + [[0, 0, 3], [0, 0, 0]]], + [[[0, 0, 0], [4, 0, 0]], + [[0, 0, 0], [0, 5, 0]], + [[0, 0, 0], [0, 0, 6]]]]) + self.diagOp(x, np.int32, expected_ans) + self.diagOp(x, np.int64, expected_ans) + + def testRankTwoFloatTensor(self): + x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]) + expected_ans = np.array( + [[[[1.1, 0, 0], [0, 0, 0]], + [[0, 2.2, 0], [0, 0, 0]], + [[0, 0, 3.3], [0, 0, 0]]], + [[[0, 0, 0], [4.4, 0, 0]], + [[0, 0, 0], [0, 5.5, 0]], + [[0, 0, 0], [0, 0, 6.6]]]]) + self.diagOp(x, np.float32, expected_ans) + self.diagOp(x, np.float64, expected_ans) + + def testRankTwoComplexTensor(self): + for dtype in [np.complex64, np.complex128]: + x = np.array([[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], + [4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]], dtype=dtype) + expected_ans = np.array( + [[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], + [[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]], + [[[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]], + [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]], + dtype=dtype) + self.diagOp(x, dtype, expected_ans) + + def testRankThreeFloatTensor(self): + x = np.array([[[1.1, 2.2], [3.3, 4.4]], + [[5.5, 6.6], [7.7, 8.8]]]) + expected_ans = np.array( + [[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]], + [[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]], + [[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]], + [[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]]) + self.diagOp(x, np.float32, expected_ans) + self.diagOp(x, np.float64, expected_ans) + + def testRankThreeComplexTensor(self): + for dtype in [np.complex64, np.complex128]: + x = np.array([[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]], + [[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]], + dtype=dtype) + expected_ans = np.array( + [[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]], + [[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]], + [[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]], + [[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]], + [[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]], + [[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]]], + [[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], + [[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]]], + [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], + [[0 + 0j, 6.6 + 6.6j], [0 + 0j, 0 + 0j]]]], + [[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j], [7.7 + 7.7j, 0 + 0j]]], + [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], + [[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]], + dtype=dtype) + self.diagOp(x, dtype, expected_ans) + + +class DiagPartOpTest(test.TestCase): + + def setUp(self): + np.random.seed(0) + + def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False): + with self.test_session(use_gpu=use_gpu): + tensor = ops.convert_to_tensor(tensor.astype(dtype)) + tf_ans_inv = array_ops.diag_part(tensor) + inv_out = tf_ans_inv.eval() + self.assertAllClose(inv_out, expected_ans) + self.assertShapeEqual(expected_ans, tf_ans_inv) + + def testRankTwoFloatTensor(self): + x = np.random.rand(3, 3) + i = np.arange(3) + expected_ans = x[i, i] + self.diagPartOp(x, np.float32, expected_ans) + self.diagPartOp(x, np.float64, expected_ans) + + def testRankFourFloatTensorUnknownShape(self): + x = np.random.rand(3, 3) + i = np.arange(3) + expected_ans = x[i, i] + for shape in None, (None, 3), (3, None): + with self.test_session(use_gpu=False): + t = ops.convert_to_tensor(x.astype(np.float32)) + t.set_shape(shape) + tf_ans = array_ops.diag_part(t) + out = tf_ans.eval() + self.assertAllClose(out, expected_ans) + self.assertShapeEqual(expected_ans, tf_ans) + + def testRankFourFloatTensor(self): + x = np.random.rand(2, 3, 2, 3) + i = np.arange(2)[:, None] + j = np.arange(3) + expected_ans = x[i, j, i, j] + self.diagPartOp(x, np.float32, expected_ans) + self.diagPartOp(x, np.float64, expected_ans) + + def testRankSixFloatTensor(self): + x = np.random.rand(2, 2, 2, 2, 2, 2) + i = np.arange(2)[:, None, None] + j = np.arange(2)[:, None] + k = np.arange(2) + expected_ans = x[i, j, k, i, j, k] + self.diagPartOp(x, np.float32, expected_ans) + self.diagPartOp(x, np.float64, expected_ans) + + def testOddRank(self): + w = np.random.rand(2) + x = np.random.rand(2, 2, 2) + self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0) + self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0) + + def testUnevenDimensions(self): + w = np.random.rand(2, 5) + x = np.random.rand(2, 1, 2, 3) + self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0) + self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0) + + +class DiagGradOpTest(test.TestCase): + + def testDiagGrad(self): + np.random.seed(0) + shapes = ((3,), (3, 3), (3, 3, 3)) + dtypes = (dtypes_lib.float32, dtypes_lib.float64) + with self.test_session(use_gpu=False): + errors = [] + for shape in shapes: + for dtype in dtypes: + x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype) + y = array_ops.diag(x1) + error = gradient_checker.compute_gradient_error( + x1, x1.get_shape().as_list(), y, y.get_shape().as_list()) + tf_logging.info("error = %f", error) + self.assertLess(error, 1e-4) + + +class DiagGradPartOpTest(test.TestCase): + + def testDiagPartGrad(self): + np.random.seed(0) + shapes = ((3, 3), (3, 3, 3, 3)) + dtypes = (dtypes_lib.float32, dtypes_lib.float64) + with self.test_session(use_gpu=False): + errors = [] + for shape in shapes: + for dtype in dtypes: + x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype) + y = array_ops.diag_part(x1) + error = gradient_checker.compute_gradient_error( + x1, x1.get_shape().as_list(), y, y.get_shape().as_list()) + tf_logging.info("error = %f", error) + self.assertLess(error, 1e-4) + + +if __name__ == "__main__": + test.main() + +# coding: utf-8 +# +# Copyright 2014 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, softwar +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for classification of 2D coordinates.""" + +__author__ = 'Sean Lip' + +from extensions.rules import coord_two_dim +import test_utils + + +class CoordTwoDimRuleUnitTests(test_utils.GenericTestBase): + """Tests for rules operating on CoordTwoDim objects.""" + + def test_within_rule(self): + self.assertFalse(coord_two_dim.Within(10, [10, 10]).eval([0, 0])) + self.assertTrue(coord_two_dim.Within(20, [10, 10]).eval([0, 0])) + self.assertFalse(coord_two_dim.Within(10, [5, 10]).eval([-5, 0])) + self.assertTrue(coord_two_dim.Within(20, [5, 10]).eval([-5, 0])) + + def test_not_within_rule(self): + self.assertTrue(coord_two_dim.NotWithin(10, [10, 10]).eval([0, 0])) + self.assertFalse(coord_two_dim.NotWithin(20, [10, 10]).eval([0, 0])) + self.assertTrue(coord_two_dim.NotWithin(10, [5, 10]).eval([-5, 0])) + self.assertFalse(coord_two_dim.NotWithin(20, [5, 10]).eval([-5, 0])) + +import threading +from django.contrib.gis.geos.libgeos import lgeos, notice_h, error_h, CONTEXT_PTR + + +class GEOSContextHandle(object): + """ + Python object representing a GEOS context handle. + """ + def __init__(self): + # Initializing the context handler for this thread with + # the notice and error handler. + self.ptr = lgeos.initGEOS_r(notice_h, error_h) + + def __del__(self): + if self.ptr: + lgeos.finishGEOS_r(self.ptr) + + +# Defining a thread-local object and creating an instance +# to hold a reference to GEOSContextHandle for this thread. +class GEOSContext(threading.local): + handle = None + +thread_context = GEOSContext() + + +class GEOSFunc(object): + """ + Class that serves as a wrapper for GEOS C Functions, and will + use thread-safe function variants when available. + """ + def __init__(self, func_name): + try: + # GEOS thread-safe function signatures end with '_r', and + # take an additional context handle parameter. + self.cfunc = getattr(lgeos, func_name + '_r') + self.threaded = True + # Create a reference here to thread_context so it's not + # garbage-collected before an attempt to call this object. + self.thread_context = thread_context + except AttributeError: + # Otherwise, use usual function. + self.cfunc = getattr(lgeos, func_name) + self.threaded = False + + def __call__(self, *args): + if self.threaded: + # If a context handle does not exist for this thread, initialize one. + if not self.thread_context.handle: + self.thread_context.handle = GEOSContextHandle() + # Call the threaded GEOS routine with pointer of the context handle + # as the first argument. + return self.cfunc(self.thread_context.handle.ptr, *args) + else: + return self.cfunc(*args) + + def __str__(self): + return self.cfunc.__name__ + + # argtypes property + def _get_argtypes(self): + return self.cfunc.argtypes + + def _set_argtypes(self, argtypes): + if self.threaded: + new_argtypes = [CONTEXT_PTR] + new_argtypes.extend(argtypes) + self.cfunc.argtypes = new_argtypes + else: + self.cfunc.argtypes = argtypes + + argtypes = property(_get_argtypes, _set_argtypes) + + # restype property + def _get_restype(self): + return self.cfunc.restype + + def _set_restype(self, restype): + self.cfunc.restype = restype + + restype = property(_get_restype, _set_restype) + + # errcheck property + def _get_errcheck(self): + return self.cfunc.errcheck + + def _set_errcheck(self, errcheck): + self.cfunc.errcheck = errcheck + + errcheck = property(_get_errcheck, _set_errcheck) + +#!/usr/bin/python + +""" +Copyright 2008 (c) Frederic Weisbecker +Licensed under the terms of the GNU GPL License version 2 + +This script parses a trace provided by the function tracer in +kernel/trace/trace_functions.c +The resulted trace is processed into a tree to produce a more human +view of the call stack by drawing textual but hierarchical tree of +calls. Only the functions's names and the the call time are provided. + +Usage: + Be sure that you have CONFIG_FUNCTION_TRACER + # mount -t debugfs nodev /sys/kernel/debug + # echo function > /sys/kernel/debug/tracing/current_tracer + $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func + Wait some times but not too much, the script is a bit slow. + Break the pipe (Ctrl + Z) + $ scripts/draw_functrace.py < raw_trace_func > draw_functrace + Then you have your drawn trace in draw_functrace +""" + + +import sys, re + +class CallTree: + """ This class provides a tree representation of the functions + call stack. If a function has no parent in the kernel (interrupt, + syscall, kernel thread...) then it is attached to a virtual parent + called ROOT. + """ + ROOT = None + + def __init__(self, func, time = None, parent = None): + self._func = func + self._time = time + if parent is None: + self._parent = CallTree.ROOT + else: + self._parent = parent + self._children = [] + + def calls(self, func, calltime): + """ If a function calls another one, call this method to insert it + into the tree at the appropriate place. + @return: A reference to the newly created child node. + """ + child = CallTree(func, calltime, self) + self._children.append(child) + return child + + def getParent(self, func): + """ Retrieve the last parent of the current node that + has the name given by func. If this function is not + on a parent, then create it as new child of root + @return: A reference to the parent. + """ + tree = self + while tree != CallTree.ROOT and tree._func != func: + tree = tree._parent + if tree == CallTree.ROOT: + child = CallTree.ROOT.calls(func, None) + return child + return tree + + def __repr__(self): + return self.__toString("", True) + + def __toString(self, branch, lastChild): + if self._time is not None: + s = "%s----%s (%s)\n" % (branch, self._func, self._time) + else: + s = "%s----%s\n" % (branch, self._func) + + i = 0 + if lastChild: + branch = branch[:-1] + " " + while i < len(self._children): + if i != len(self._children) - 1: + s += "%s" % self._children[i].__toString(branch +\ + " |", False) + else: + s += "%s" % self._children[i].__toString(branch +\ + " |", True) + i += 1 + return s + +class BrokenLineException(Exception): + """If the last line is not complete because of the pipe breakage, + we want to stop the processing and ignore this line. + """ + pass + +class CommentLineException(Exception): + """ If the line is a comment (as in the beginning of the trace file), + just ignore it. + """ + pass + + +def parseLine(line): + line = line.strip() + if line.startswith("#"): + raise CommentLineException + m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) + if m is None: + raise BrokenLineException + return (m.group(1), m.group(2), m.group(3)) + + +def main(): + CallTree.ROOT = CallTree("Root (Nowhere)", None, None) + tree = CallTree.ROOT + + for line in sys.stdin: + try: + calltime, callee, caller = parseLine(line) + except BrokenLineException: + break + except CommentLineException: + continue + tree = tree.getParent(caller) + tree = tree.calls(callee, calltime) + + print CallTree.ROOT + +if __name__ == "__main__": + main() + +# As a test suite for the os module, this is woefully inadequate, but this +# does add tests for a few functions which have been determined to be more +# portable than they had been thought to be. + +import os +import unittest +import warnings +import sys +from test import test_support + +warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__) +warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__) + +# Tests creating TESTFN +class FileTests(unittest.TestCase): + def setUp(self): + if os.path.exists(test_support.TESTFN): + os.unlink(test_support.TESTFN) + tearDown = setUp + + def test_access(self): + f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) + os.close(f) + self.assert_(os.access(test_support.TESTFN, os.W_OK)) + + def test_closerange(self): + first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) + # We must allocate two consecutive file descriptors, otherwise + # it will mess up other file descriptors (perhaps even the three + # standard ones). + second = os.dup(first) + try: + retries = 0 + while second != first + 1: + os.close(first) + retries += 1 + if retries > 10: + # XXX test skipped + print >> sys.stderr, ( + "couldn't allocate two consecutive fds, " + "skipping test_closerange") + return + first, second = second, os.dup(second) + finally: + os.close(second) + # close a fd that is open, and one that isn't + os.closerange(first, first + 2) + self.assertRaises(OSError, os.write, first, "a") + + def test_rename(self): + path = unicode(test_support.TESTFN) + old = sys.getrefcount(path) + self.assertRaises(TypeError, os.rename, path, 0) + new = sys.getrefcount(path) + self.assertEqual(old, new) + + +class TemporaryFileTests(unittest.TestCase): + def setUp(self): + self.files = [] + os.mkdir(test_support.TESTFN) + + def tearDown(self): + for name in self.files: + os.unlink(name) + os.rmdir(test_support.TESTFN) + + def check_tempfile(self, name): + # make sure it doesn't already exist: + self.failIf(os.path.exists(name), + "file already exists for temporary file") + # make sure we can create the file + open(name, "w") + self.files.append(name) + + def test_tempnam(self): + if not hasattr(os, "tempnam"): + return + warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, + r"test_os$") + self.check_tempfile(os.tempnam()) + + name = os.tempnam(test_support.TESTFN) + self.check_tempfile(name) + + name = os.tempnam(test_support.TESTFN, "pfx") + self.assert_(os.path.basename(name)[:3] == "pfx") + self.check_tempfile(name) + + def test_tmpfile(self): + if not hasattr(os, "tmpfile"): + return + # As with test_tmpnam() below, the Windows implementation of tmpfile() + # attempts to create a file in the root directory of the current drive. + # On Vista and Server 2008, this test will always fail for normal users + # as writing to the root directory requires elevated privileges. With + # XP and below, the semantics of tmpfile() are the same, but the user + # running the test is more likely to have administrative privileges on + # their account already. If that's the case, then os.tmpfile() should + # work. In order to make this test as useful as possible, rather than + # trying to detect Windows versions or whether or not the user has the + # right permissions, just try and create a file in the root directory + # and see if it raises a 'Permission denied' OSError. If it does, then + # test that a subsequent call to os.tmpfile() raises the same error. If + # it doesn't, assume we're on XP or below and the user running the test + # has administrative privileges, and proceed with the test as normal. + if sys.platform == 'win32': + name = '\\python_test_os_test_tmpfile.txt' + if os.path.exists(name): + os.remove(name) + try: + fp = open(name, 'w') + except IOError, first: + # open() failed, assert tmpfile() fails in the same way. + # Although open() raises an IOError and os.tmpfile() raises an + # OSError(), 'args' will be (13, 'Permission denied') in both + # cases. + try: + fp = os.tmpfile() + except OSError, second: + self.assertEqual(first.args, second.args) + else: + self.fail("expected os.tmpfile() to raise OSError") + return + else: + # open() worked, therefore, tmpfile() should work. Close our + # dummy file and proceed with the test as normal. + fp.close() + os.remove(name) + + fp = os.tmpfile() + fp.write("foobar") + fp.seek(0,0) + s = fp.read() + fp.close() + self.assert_(s == "foobar") + + def test_tmpnam(self): + import sys + if not hasattr(os, "tmpnam"): + return + warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, + r"test_os$") + name = os.tmpnam() + if sys.platform in ("win32",): + # The Windows tmpnam() seems useless. From the MS docs: + # + # The character string that tmpnam creates consists of + # the path prefix, defined by the entry P_tmpdir in the + # file STDIO.H, followed by a sequence consisting of the + # digit characters '0' through '9'; the numerical value + # of this string is in the range 1 - 65,535. Changing the + # definitions of L_tmpnam or P_tmpdir in STDIO.H does not + # change the operation of tmpnam. + # + # The really bizarre part is that, at least under MSVC6, + # P_tmpdir is "\\". That is, the path returned refers to + # the root of the current drive. That's a terrible place to + # put temp files, and, depending on privileges, the user + # may not even be able to open a file in the root directory. + self.failIf(os.path.exists(name), + "file already exists for temporary file") + else: + self.check_tempfile(name) + +# Test attributes on return values from os.*stat* family. +class StatAttributeTests(unittest.TestCase): + def setUp(self): + os.mkdir(test_support.TESTFN) + self.fname = os.path.join(test_support.TESTFN, "f1") + f = open(self.fname, 'wb') + f.write("ABC") + f.close() + + def tearDown(self): + os.unlink(self.fname) + os.rmdir(test_support.TESTFN) + + def test_stat_attributes(self): + if not hasattr(os, "stat"): + return + + import stat + result = os.stat(self.fname) + + # Make sure direct access works + self.assertEquals(result[stat.ST_SIZE], 3) + self.assertEquals(result.st_size, 3) + + import sys + + # Make sure all the attributes are there + members = dir(result) + for name in dir(stat): + if name[:3] == 'ST_': + attr = name.lower() + if name.endswith("TIME"): + def trunc(x): return int(x) + else: + def trunc(x): return x + self.assertEquals(trunc(getattr(result, attr)), + result[getattr(stat, name)]) + self.assert_(attr in members) + + try: + result[200] + self.fail("No exception thrown") + except IndexError: + pass + + # Make sure that assignment fails + try: + result.st_mode = 1 + self.fail("No exception thrown") + except TypeError: + pass + + try: + result.st_rdev = 1 + self.fail("No exception thrown") + except (AttributeError, TypeError): + pass + + try: + result.parrot = 1 + self.fail("No exception thrown") + except AttributeError: + pass + + # Use the stat_result constructor with a too-short tuple. + try: + result2 = os.stat_result((10,)) + self.fail("No exception thrown") + except TypeError: + pass + + # Use the constructr with a too-long tuple. + try: + result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) + except TypeError: + pass + + + def test_statvfs_attributes(self): + if not hasattr(os, "statvfs"): + return + + try: + result = os.statvfs(self.fname) + except OSError, e: + # On AtheOS, glibc always returns ENOSYS + import errno + if e.errno == errno.ENOSYS: + return + + # Make sure direct access works + self.assertEquals(result.f_bfree, result[3]) + + # Make sure all the attributes are there. + members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files', + 'ffree', 'favail', 'flag', 'namemax') + for value, member in enumerate(members): + self.assertEquals(getattr(result, 'f_' + member), result[value]) + + # Make sure that assignment really fails + try: + result.f_bfree = 1 + self.fail("No exception thrown") + except TypeError: + pass + + try: + result.parrot = 1 + self.fail("No exception thrown") + except AttributeError: + pass + + # Use the constructor with a too-short tuple. + try: + result2 = os.statvfs_result((10,)) + self.fail("No exception thrown") + except TypeError: + pass + + # Use the constructr with a too-long tuple. + try: + result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) + except TypeError: + pass + + def test_utime_dir(self): + delta = 1000000 + st = os.stat(test_support.TESTFN) + # round to int, because some systems may support sub-second + # time stamps in stat, but not in utime. + os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta))) + st2 = os.stat(test_support.TESTFN) + self.assertEquals(st2.st_mtime, int(st.st_mtime-delta)) + + # Restrict test to Win32, since there is no guarantee other + # systems support centiseconds + if sys.platform == 'win32': + def get_file_system(path): + root = os.path.splitdrive(os.path.abspath(path))[0] + '\\' + import ctypes + kernel32 = ctypes.windll.kernel32 + buf = ctypes.create_string_buffer("", 100) + if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)): + return buf.value + + if get_file_system(test_support.TESTFN) == "NTFS": + def test_1565150(self): + t1 = 1159195039.25 + os.utime(self.fname, (t1, t1)) + self.assertEquals(os.stat(self.fname).st_mtime, t1) + + def test_1686475(self): + # Verify that an open file can be stat'ed + try: + os.stat(r"c:\pagefile.sys") + except WindowsError, e: + if e.errno == 2: # file does not exist; cannot run test + return + self.fail("Could not stat pagefile.sys") + +from test import mapping_tests + +class EnvironTests(mapping_tests.BasicTestMappingProtocol): + """check that os.environ object conform to mapping protocol""" + type2test = None + def _reference(self): + return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"} + def _empty_mapping(self): + os.environ.clear() + return os.environ + def setUp(self): + self.__save = dict(os.environ) + os.environ.clear() + def tearDown(self): + os.environ.clear() + os.environ.update(self.__save) + + # Bug 1110478 + def test_update2(self): + if os.path.exists("/bin/sh"): + os.environ.update(HELLO="World") + value = os.popen("/bin/sh -c 'echo $HELLO'").read().strip() + self.assertEquals(value, "World") + +class WalkTests(unittest.TestCase): + """Tests for os.walk().""" + + def test_traversal(self): + import os + from os.path import join + + # Build: + # TESTFN/ + # TEST1/ a file kid and two directory kids + # tmp1 + # SUB1/ a file kid and a directory kid + # tmp2 + # SUB11/ no kids + # SUB2/ a file kid and a dirsymlink kid + # tmp3 + # link/ a symlink to TESTFN.2 + # TEST2/ + # tmp4 a lone file + walk_path = join(test_support.TESTFN, "TEST1") + sub1_path = join(walk_path, "SUB1") + sub11_path = join(sub1_path, "SUB11") + sub2_path = join(walk_path, "SUB2") + tmp1_path = join(walk_path, "tmp1") + tmp2_path = join(sub1_path, "tmp2") + tmp3_path = join(sub2_path, "tmp3") + link_path = join(sub2_path, "link") + t2_path = join(test_support.TESTFN, "TEST2") + tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4") + + # Create stuff. + os.makedirs(sub11_path) + os.makedirs(sub2_path) + os.makedirs(t2_path) + for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path: + f = file(path, "w") + f.write("I'm " + path + " and proud of it. Blame test_os.\n") + f.close() + if hasattr(os, "symlink"): + os.symlink(os.path.abspath(t2_path), link_path) + sub2_tree = (sub2_path, ["link"], ["tmp3"]) + else: + sub2_tree = (sub2_path, [], ["tmp3"]) + + # Walk top-down. + all = list(os.walk(walk_path)) + self.assertEqual(len(all), 4) + # We can't know which order SUB1 and SUB2 will appear in. + # Not flipped: TESTFN, SUB1, SUB11, SUB2 + # flipped: TESTFN, SUB2, SUB1, SUB11 + flipped = all[0][1][0] != "SUB1" + all[0][1].sort() + self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) + self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"])) + self.assertEqual(all[2 + flipped], (sub11_path, [], [])) + self.assertEqual(all[3 - 2 * flipped], sub2_tree) + + # Prune the search. + all = [] + for root, dirs, files in os.walk(walk_path): + all.append((root, dirs, files)) + # Don't descend into SUB1. + if 'SUB1' in dirs: + # Note that this also mutates the dirs we appended to all! + dirs.remove('SUB1') + self.assertEqual(len(all), 2) + self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"])) + self.assertEqual(all[1], sub2_tree) + + # Walk bottom-up. + all = list(os.walk(walk_path, topdown=False)) + self.assertEqual(len(all), 4) + # We can't know which order SUB1 and SUB2 will appear in. + # Not flipped: SUB11, SUB1, SUB2, TESTFN + # flipped: SUB2, SUB11, SUB1, TESTFN + flipped = all[3][1][0] != "SUB1" + all[3][1].sort() + self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) + self.assertEqual(all[flipped], (sub11_path, [], [])) + self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"])) + self.assertEqual(all[2 - 2 * flipped], sub2_tree) + + if hasattr(os, "symlink"): + # Walk, following symlinks. + for root, dirs, files in os.walk(walk_path, followlinks=True): + if root == link_path: + self.assertEqual(dirs, []) + self.assertEqual(files, ["tmp4"]) + break + else: + self.fail("Didn't follow symlink with followlinks=True") + + def tearDown(self): + # Tear everything down. This is a decent use for bottom-up on + # Windows, which doesn't have a recursive delete command. The + # (not so) subtlety is that rmdir will fail unless the dir's + # kids are removed first, so bottom up is essential. + for root, dirs, files in os.walk(test_support.TESTFN, topdown=False): + for name in files: + os.remove(os.path.join(root, name)) + for name in dirs: + dirname = os.path.join(root, name) + if not os.path.islink(dirname): + os.rmdir(dirname) + else: + os.remove(dirname) + os.rmdir(test_support.TESTFN) + +class MakedirTests (unittest.TestCase): + def setUp(self): + os.mkdir(test_support.TESTFN) + + def test_makedir(self): + base = test_support.TESTFN + path = os.path.join(base, 'dir1', 'dir2', 'dir3') + os.makedirs(path) # Should work + path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4') + os.makedirs(path) + + # Try paths with a '.' in them + self.failUnlessRaises(OSError, os.makedirs, os.curdir) + path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir) + os.makedirs(path) + path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4', + 'dir5', 'dir6') + os.makedirs(path) + + + + + def tearDown(self): + path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3', + 'dir4', 'dir5', 'dir6') + # If the tests failed, the bottom-most directory ('../dir6') + # may not have been created, so we look for the outermost directory + # that exists. + while not os.path.exists(path) and path != test_support.TESTFN: + path = os.path.dirname(path) + + os.removedirs(path) + +class DevNullTests (unittest.TestCase): + def test_devnull(self): + f = file(os.devnull, 'w') + f.write('hello') + f.close() + f = file(os.devnull, 'r') + self.assertEqual(f.read(), '') + f.close() + +class URandomTests (unittest.TestCase): + def test_urandom(self): + try: + self.assertEqual(len(os.urandom(1)), 1) + self.assertEqual(len(os.urandom(10)), 10) + self.assertEqual(len(os.urandom(100)), 100) + self.assertEqual(len(os.urandom(1000)), 1000) + # see http://bugs.python.org/issue3708 + self.assertEqual(len(os.urandom(0.9)), 0) + self.assertEqual(len(os.urandom(1.1)), 1) + self.assertEqual(len(os.urandom(2.0)), 2) + except NotImplementedError: + pass + +class Win32ErrorTests(unittest.TestCase): + def test_rename(self): + self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak") + + def test_remove(self): + self.assertRaises(WindowsError, os.remove, test_support.TESTFN) + + def test_chdir(self): + self.assertRaises(WindowsError, os.chdir, test_support.TESTFN) + + def test_mkdir(self): + self.assertRaises(WindowsError, os.chdir, test_support.TESTFN) + + def test_utime(self): + self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None) + + def test_access(self): + self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0) + + def test_chmod(self): + self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0) + +class TestInvalidFD(unittest.TestCase): + singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat", + "fstatvfs", "fsync", "tcgetpgrp", "ttyname"] + #singles.append("close") + #We omit close because it doesn'r raise an exception on some platforms + def get_single(f): + def helper(self): + if hasattr(os, f): + self.check(getattr(os, f)) + return helper + for f in singles: + locals()["test_"+f] = get_single(f) + + def check(self, f, *args): + self.assertRaises(OSError, f, test_support.make_bad_fd(), *args) + + def test_isatty(self): + if hasattr(os, "isatty"): + self.assertEqual(os.isatty(test_support.make_bad_fd()), False) + + def test_closerange(self): + if hasattr(os, "closerange"): + fd = test_support.make_bad_fd() + self.assertEqual(os.closerange(fd, fd + 10), None) + + def test_dup2(self): + if hasattr(os, "dup2"): + self.check(os.dup2, 20) + + def test_fchmod(self): + if hasattr(os, "fchmod"): + self.check(os.fchmod, 0) + + def test_fchown(self): + if hasattr(os, "fchown"): + self.check(os.fchown, -1, -1) + + def test_fpathconf(self): + if hasattr(os, "fpathconf"): + self.check(os.fpathconf, "PC_NAME_MAX") + + #this is a weird one, it raises IOError unlike the others + def test_ftruncate(self): + if hasattr(os, "ftruncate"): + self.assertRaises(IOError, os.ftruncate, test_support.make_bad_fd(), + 0) + + def test_lseek(self): + if hasattr(os, "lseek"): + self.check(os.lseek, 0, 0) + + def test_read(self): + if hasattr(os, "read"): + self.check(os.read, 1) + + def test_tcsetpgrpt(self): + if hasattr(os, "tcsetpgrp"): + self.check(os.tcsetpgrp, 0) + + def test_write(self): + if hasattr(os, "write"): + self.check(os.write, " ") + +if sys.platform != 'win32': + class Win32ErrorTests(unittest.TestCase): + pass + + class PosixUidGidTests(unittest.TestCase): + if hasattr(os, 'setuid'): + def test_setuid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.setuid, 0) + self.assertRaises(OverflowError, os.setuid, 1<<32) + + if hasattr(os, 'setgid'): + def test_setgid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.setgid, 0) + self.assertRaises(OverflowError, os.setgid, 1<<32) + + if hasattr(os, 'seteuid'): + def test_seteuid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.seteuid, 0) + self.assertRaises(OverflowError, os.seteuid, 1<<32) + + if hasattr(os, 'setegid'): + def test_setegid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.setegid, 0) + self.assertRaises(OverflowError, os.setegid, 1<<32) + + if hasattr(os, 'setreuid'): + def test_setreuid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.setreuid, 0, 0) + self.assertRaises(OverflowError, os.setreuid, 1<<32, 0) + self.assertRaises(OverflowError, os.setreuid, 0, 1<<32) + + if hasattr(os, 'setregid'): + def test_setregid(self): + if os.getuid() != 0: + self.assertRaises(os.error, os.setregid, 0, 0) + self.assertRaises(OverflowError, os.setregid, 1<<32, 0) + self.assertRaises(OverflowError, os.setregid, 0, 1<<32) +else: + class PosixUidGidTests(unittest.TestCase): + pass + +def test_main(): + test_support.run_unittest( + FileTests, + TemporaryFileTests, + StatAttributeTests, + EnvironTests, + WalkTests, + MakedirTests, + DevNullTests, + URandomTests, + Win32ErrorTests, + TestInvalidFD, + PosixUidGidTests + ) + +if __name__ == "__main__": + test_main() + +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +TARGETS = [ + 'AjaxTest.py', +] + + +PACKAGE = { + 'title': 'Ajaxlib Test', + 'desc': 'Experimental Dynamic Module loading of a Javascript Module', +} + + +def setup(targets): + '''Setup example for translation, MUST call util.setup(targets).''' + util.setup(targets) + + +def translate(): + '''Translate example, MUST call util.translate().''' + util.translate() + + +def install(package): + '''Install and cleanup example module. MUST call util.install(package)''' + util.install(package) + + +##---------------------------------------## +# --------- (-: DO NOT EDIT :-) --------- # +##---------------------------------------## + + +import sys +import os + + +examples = head = os.path.abspath(os.path.dirname(__file__)) +while os.path.split(examples)[1].lower() != 'examples': + examples = os.path.split(examples)[0] + if not examples: + raise ValueError("Cannot determine examples directory") +sys.path.insert(0, os.path.join(examples)) +from _examples import util +sys.path.pop(0) + +util.init(head) + +setup(TARGETS) +translate() +install(PACKAGE) + +""" PingThread.py + +Thread that pings the target server """ + +# Imports +import NetworkMonitor +import re +import ThreadBase +import time +import subprocess + +class PingThread(ThreadBase.ThreadBase): + def __init__(self, platform, targetServer, interval, logFile, outputFile): + """Constructor""" + # Initialize variables + super(PingThread, self).__init__("Ping", interval, logFile, outputFile) + self.platform = platform + self.targetServer = targetServer + self.outputFile.SetFileHeader("Packets sent\tPackets received\tMinimum (ms)\tAverage (ms)\tMaximum (ms)\tStdev (ms)\tTotal time(s)") + if self.platform == NetworkMonitor.PLATFORM_LINUX: + # Ping output lines of interest look like the following on Ubuntu: + # 4 packets transmitted, 4 received, 0% packet loss, time 3004ms + # rtt min/avg/max/mdev = 47.014/51.046/62.049/6.368 ms + self.command = "ping -c 4 -q %s" % self.targetServer + self.regEx1 = re.compile("([0-9]+) packets transmitted, ([0-9]+) received, ([0-9.]+)% packet loss, time ([0-9]+)ms") + self.regEx1Groups = 4 + self.regEx2 = re.compile("rtt min/avg/max/mdev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms") + self.regEx2Groups = 4 + elif self.platform == NetworkMonitor.PLATFORM_MACOS: + # Ping output lines of interest look like the following on Mac OS: + # 4 packets transmitted, 4 packets received, 0.0% packet loss + # round-trip min/avg/max/stddev = 47.399/48.315/50.227/1.117 ms + self.command = "ping -c 4 -q %s" % self.targetServer + self.regEx1 = re.compile("([0-9]+) packets transmitted, ([0-9]+) packets received, ([0-9.]+)% packet loss") + self.regEx1Groups = 3 + self.regEx2 = re.compile("round-trip min/avg/max/stddev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms") + self.regEx2Groups = 4 + elif self.platform == NetworkMonitor.PLATFORM_WINDOWS: + # Ping output lines of interest look like the following on Windows: + # Packets: Sent = 4, Received = 4, Lost = 0 (0% loss), + # Minimum = 45ms, Maximum = 58ms, Average = 49ms + self.command = "ping -n 4 %s" % self.targetServer + self.regEx1 = re.compile("Packets: Sent = ([0-9]+), Received = ([0-9]+), Lost = ([0-9]+) \(([0-9.]+)% loss\),") + self.regEx1Groups = 4 + self.regEx2 = re.compile("Minimum = ([0-9.]+)ms, Maximum = ([0-9.]+)ms, Average = ([0-9.]+)ms") + self.regEx2Groups = 3 + else: + raise Exception("Unknown platform: " + self.platform) + + def PingServer(self): + """Pings the server four times and returns statistics""" + # Create the process and read in the output lines + proc = subprocess.Popen(self.command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + lines = proc.stdout.readlines() + + # Windows requires each line to be decoded + if self.platform == NetworkMonitor.PLATFORM_WINDOWS: + newLines = [] + for line in lines: + newLines.append(line.decode("utf-8")) + lines = newLines + + # Wait until the process completes and parse the output + proc.wait() + packetsTransmitted = -1 + packetsReceived = -1 + min = -1 + avg = -1 + max = -1 + stdev = -1 + for line in lines: + strippedLine = line.strip() + result = self.regEx1.match(strippedLine) + if (result is not None) and (result.lastindex >= self.regEx1Groups): + packetsTransmitted = int(result.group(1)) + packetsReceived = int(result.group(2)) + result = self.regEx2.match(strippedLine) + if (result is not None) and (result.lastindex >= self.regEx2Groups): + min = float(result.group(1)) + if self.platform != NetworkMonitor.PLATFORM_WINDOWS: + avg = float(result.group(2)) + max = float(result.group(3)) + else: + max = float(result.group(2)) + avg = float(result.group(3)) + if self.regEx2Groups == 4: + stdev = float(result.group(4)) + if packetsTransmitted == -1: + # Failed to parse the output + if proc.returncode != 0: + error = "Ping command failed with the following output:\n" + else: + error = "Failed to parse ping output:\n" + for line in lines: + error += line + self.logFile.Write(error) + return (packetsTransmitted, packetsReceived, min, avg, max, stdev) + + def Capture(self): + """Pings the server and writes the statistics as the next data point""" + # Gather ping data + startTime = time.time() + pingPacketsTransmitted, pingPacketsReceived, pingMin, pingAverage, pingMax, pingStdev = self.PingServer() + elapsedTime = time.time() - startTime + + # Write out the data point + self.outputFile.Write("%i\t%i\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.2f" % (pingPacketsTransmitted, pingPacketsReceived, pingMin, pingAverage, pingMax, pingStdev, elapsedTime)) + +""" +Support for Pilight sensors. + +For more details about this platform, please refer to the documentation at +https://home-assistant.io/components/sensor.pilight/ +""" +import logging + +import voluptuous as vol + +from homeassistant.const import ( + CONF_NAME, STATE_UNKNOWN, CONF_UNIT_OF_MEASUREMENT, CONF_PAYLOAD) +from homeassistant.components.sensor import PLATFORM_SCHEMA +from homeassistant.helpers.entity import Entity +import homeassistant.components.pilight as pilight +import homeassistant.helpers.config_validation as cv + +_LOGGER = logging.getLogger(__name__) + +CONF_VARIABLE = 'variable' + +DEFAULT_NAME = 'Pilight Sensor' +DEPENDENCIES = ['pilight'] + +PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ + vol.Required(CONF_VARIABLE): cv.string, + vol.Required(CONF_PAYLOAD): vol.Schema(dict), + vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, + vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=None): cv.string, +}) + + +# pylint: disable=unused-argument +def setup_platform(hass, config, add_devices, discovery_info=None): + """Set up Pilight Sensor.""" + add_devices([PilightSensor( + hass=hass, + name=config.get(CONF_NAME), + variable=config.get(CONF_VARIABLE), + payload=config.get(CONF_PAYLOAD), + unit_of_measurement=config.get(CONF_UNIT_OF_MEASUREMENT) + )]) + + +class PilightSensor(Entity): + """Representation of a sensor that can be updated using Pilight.""" + + def __init__(self, hass, name, variable, payload, unit_of_measurement): + """Initialize the sensor.""" + self._state = STATE_UNKNOWN + self._hass = hass + self._name = name + self._variable = variable + self._payload = payload + self._unit_of_measurement = unit_of_measurement + + hass.bus.listen(pilight.EVENT, self._handle_code) + + @property + def should_poll(self): + """No polling needed.""" + return False + + @property + def name(self): + """Return the name of the sensor.""" + return self._name + + @property + def unit_of_measurement(self): + """Return the unit this state is expressed in.""" + return self._unit_of_measurement + + @property + def state(self): + """Return the state of the entity.""" + return self._state + + def _handle_code(self, call): + """Handle received code by the pilight-daemon. + + If the code matches the defined playload + of this sensor the sensor state is changed accordingly. + """ + # Check if received code matches defined playoad + # True if payload is contained in received code dict, not + # all items have to match + if self._payload.items() <= call.data.items(): + try: + value = call.data[self._variable] + self._state = value + self.update_ha_state() + except KeyError: + _LOGGER.error( + 'No variable %s in received code data %s', + str(self._variable), str(call.data)) + +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +from __future__ import division, absolute_import, print_function + +import operator as op +from numbers import Number + +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite) +from numpy.compat import long + + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE) + + +def test_class_methods(): + for Poly1 in classes: + for Poly2 in classes: + yield check_conversion, Poly1, Poly2 + yield check_cast, Poly1, Poly2 + for Poly in classes: + yield check_call, Poly + yield check_identity, Poly + yield check_basis, Poly + yield check_fromroots, Poly + yield check_fit, Poly + yield check_equal, Poly + yield check_not_equal, Poly + yield check_add, Poly + yield check_sub, Poly + yield check_mul, Poly + yield check_floordiv, Poly + yield check_truediv, Poly + yield check_mod, Poly + yield check_divmod, Poly + yield check_pow, Poly + yield check_integ, Poly + yield check_deriv, Poly + yield check_roots, Poly + yield check_linspace, Poly + yield check_mapparms, Poly + yield check_degree, Poly + yield check_copy, Poly + yield check_cutdeg, Poly + yield check_truncate, Poly + yield check_trim, Poly + + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = "Result: %s\nTarget: %s", (p1, p2) + raise AssertionError(msg) + + +# +# conversion methods that depend on two classes +# + + +def check_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def check_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# methods that depend on one class +# + + +def check_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def check_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def check_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def check_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def check_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def check_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def check_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def check_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def check_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def check_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def check_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, long, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def check_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def check_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def check_roots(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = np.sort(random((5,))) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def check_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def check_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def check_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def check_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def check_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def check_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def check_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def check_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def check_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def check_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def check_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +if __name__ == "__main__": + run_module_suite() + +import grokcore.component +import z3c.traverser.interfaces +import zeit.cms.workingcopy.interfaces +import zope.app.container.btree +import zope.app.security.interfaces +import zope.component +import zope.dublincore.interfaces +import zope.interface +import zope.publisher.interfaces +import zope.security.interfaces +import zope.security.management +import zope.securitypolicy.interfaces + + +class Workingcopy(zope.app.container.btree.BTreeContainer): + """The working copy is the area of the CMS where users edit content.""" + + zope.interface.implements(zeit.cms.workingcopy.interfaces.IWorkingcopy) + _order = () + temporary = False # avoid migration of existing objects + + def __init__(self, temporary=False): + super(Workingcopy, self).__init__() + self.temporary = temporary + + def __iter__(self): + for key in reversed(self._order): + yield key + for key in super(Workingcopy, self).__iter__(): + if key in self._order: + continue + yield key + + def values(self): + for key in self: + yield self[key] + + def __setitem__(self, key, item): + if not zeit.cms.workingcopy.interfaces.ILocalContent.providedBy(item): + raise ValueError("Must provide ILocalContent") + super(Workingcopy, self).__setitem__(key, item) + self._order += (key, ) + + def __delitem__(self, key): + super(Workingcopy, self).__delitem__(key) + order = list(self._order) + try: + order.remove(key) + except ValueError: + pass + else: + self._order = tuple(order) + + +class WorkingcopyLocation(zope.app.container.btree.BTreeContainer): + """Location for working copies of all users.""" + + zope.interface.implements( + zeit.cms.workingcopy.interfaces.IWorkingcopyLocation) + + def getWorkingcopy(self): + """Get the working copy for the currently logged in user.""" + principal = self._get_principal() + return self.getWorkingcopyFor(principal) + + def getWorkingcopyFor(self, principal): + principal_id = principal.id + try: + result = self[principal_id] + except KeyError: + # User doesn't have a working copy yet, create one + result = self[principal_id] = Workingcopy() + perms = ( + zope.securitypolicy.interfaces.IPrincipalPermissionManager( + result)) + perms.grantPermissionToPrincipal('zeit.EditContent', principal_id) + + prm = zope.securitypolicy.interfaces.IPrincipalRoleManager( + result) + prm.assignRoleToPrincipal('zeit.Owner', principal_id) + + try: + dc = zope.dublincore.interfaces.IDCDescriptiveProperties( + result) + except TypeError: + pass + else: + if principal.title: + dc.title = principal.title + if principal.description: + dc.description = principal.description + return result + + def _get_principal(self): + # Find the current principal. Note that it is possible for there + # to be more than one principal - in this case we throw an error. + interaction = zope.security.management.getInteraction() + principal = None + for p in interaction.participations: + if principal is None: + principal = p.principal + else: + raise ValueError("Multiple principals found") + if principal is None: + raise ValueError("No principal found") + return principal + + +@zope.component.adapter(zope.security.interfaces.IPrincipal) +@zope.interface.implementer(zeit.cms.workingcopy.interfaces.IWorkingcopy) +def principalAdapter(context): + location = zope.component.getUtility( + zeit.cms.workingcopy.interfaces.IWorkingcopyLocation) + return location.getWorkingcopyFor(context) + + +@grokcore.component.adapter(None) +@grokcore.component.implementer(zeit.cms.workingcopy.interfaces.IWorkingcopy) +def workingcopy_for_current_principal(ignored): + # Find the current principal. Note that it is possible for there + # to be more than one principal - in this case adapting fails + try: + interaction = zope.security.management.getInteraction() + except zope.security.interfaces.NoInteraction: + return + principal = None + for p in interaction.participations: + if principal is None: + principal = p.principal + else: + return + if principal is None: + return + return zeit.cms.workingcopy.interfaces.IWorkingcopy(principal, None) + + +class WorkingcopyTraverser(object): + """Traverses to working copies, creating them on the fly.""" + + zope.interface.implements(z3c.traverser.interfaces.IPluggableTraverser) + + def __init__(self, context, request): + self.context = context + self.request = request + + def publishTraverse(self, request, name): + auth = zope.component.getUtility( + zope.app.security.interfaces.IAuthentication) + try: + principal = auth.getPrincipal(name) + except zope.app.security.interfaces.PrincipalLookupError: + raise zope.publisher.interfaces.NotFound( + self.context, name, request) + return zeit.cms.workingcopy.interfaces.IWorkingcopy(principal) + +import os +import sys +import shutil +import string +import subprocess +import re +from settings.settingsLoader import OXA_FILEHD_CACHE_VMS,OXA_FILEHD_REMOTE_VMS,OXA_FILEHD_CACHE_TEMPLATES,OXA_FILEHD_REMOTE_TEMPLATES,OXA_FILEHD_USE_CACHE,OXA_FILEHD_NICE_PRIORITY,OXA_FILEHD_CREATE_SPARSE_DISK,OXA_FILEHD_IONICE_CLASS, OXA_FILEHD_IONICE_PRIORITY,OXA_FILEHD_DD_BS_KB, OXA_DEFAULT_SWAP_SIZE_MB +from utils.AgentExceptions import * +from utils.Logger import Logger + +''' + @author: msune + + File-type Hd management routines +''' + +OXA_FILEHD_HD_TMP_MP="/tmp/oxa/hd" + +class FileHdManager(object): + ''' + File-type Hard Disk management routines + ''' + + logger = Logger.getLogger() + + #Enables/disables the usage of Cache directory + __useCache=OXA_FILEHD_USE_CACHE + + ##Utils + @staticmethod + def subprocessCall(command, priority=OXA_FILEHD_NICE_PRIORITY, ioPriority=OXA_FILEHD_IONICE_PRIORITY, ioClass=OXA_FILEHD_IONICE_CLASS, stdout=None): + try: + wrappedCmd = "/usr/bin/nice -n "+str(priority)+" /usr/bin/ionice -c "+str(ioClass)+" -n "+str(ioPriority)+" "+command + FileHdManager.logger.debug("Executing: "+wrappedCmd) + subprocess.check_call(wrappedCmd, shell=True, stdout=stdout) + except Exception as e: + FileHdManager.logger.error("Unable to execute command: "+command) + raise e + + + + + #Debug string + @staticmethod + def debugVM(vm): + return " project:"+vm.project_id+", slice:"+vm.slice_id+", name:"+vm.name + + + #Paths + ''' Returns the container directory for the VM in remote FS''' + @staticmethod + def getRemoteHdDirectory(vm): + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/" + + ''' Returns the container directory for the VM in remote Cache, if used''' + @staticmethod + def getHdDirectory(vm): + if FileHdManager.__useCache: + return OXA_FILEHD_CACHE_VMS+vm.project_id+"/"+vm.slice_id+"/" + else: + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/" + + ''' Returns the path of the hd file in Cache, if used''' + @staticmethod + def getHdPath(vm): + if FileHdManager.__useCache: + return OXA_FILEHD_CACHE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".img" + else: + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".img" + + ''' Returns the path of the hd file in Remote''' + @staticmethod + def getRemoteHdPath(vm): + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".img" + + ''' Returns the path of the swap hd file in Cache, if used''' + @staticmethod + def getSwapPath(vm): + if FileHdManager.__useCache: + return OXA_FILEHD_CACHE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+"_swap"+".img" + else: + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+"_swap"+".img" + + ''' Returns the path of the swap hd file in Remote''' + @staticmethod + def getRemoteSwapPath(vm): + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+"_swap"+".img" + + ''' Returns the path of the config file in Cache, if used''' + @staticmethod + def getConfigFilePath(vm): + if FileHdManager.__useCache: + return OXA_FILEHD_CACHE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".conf" + else: + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".conf" + + ''' Returns the path of the config file in Remote''' + @staticmethod + def getRemoteConfigFilePath(vm): + return OXA_FILEHD_REMOTE_VMS+vm.project_id+"/"+vm.slice_id+"/"+vm.name+".conf" + + ''' Returns the path of the temporally mounted Hd in the dom0 filesystem''' + @staticmethod + def getTmpMountedHdPath(vm): + return OXA_FILEHD_HD_TMP_MP+vm.name+"_"+vm.uuid+"/" + + ''' Returns the path of the templates origin''' + @staticmethod + def getTemplatesPath(vm): + if FileHdManager.__useCache: + return OXA_FILEHD_CACHE_TEMPLATES + else: + return OXA_FILEHD_REMOTE_TEMPLATES + + + + ##Hooks + '''Pre-start Hook''' + @staticmethod + def startHook(vm): + if not FileHdManager.isVMinCacheFS(vm): + FileHdManager.moveVMToCacheFS(vm) + + '''Pre-reboot Hook''' + @staticmethod + def rebootHook(vm): + return + + '''Post-stop Hook''' + @staticmethod + def stopHook(vm): + if FileHdManager.isVMinCacheFS(vm): + FileHdManager.moveVMToRemoteFS(vm) + + + + ##Hd management routines + + @staticmethod + def __fileTemplateExistsOrImportFromRemote(filepath): + + #if Cache is not used skip + if not FileHdManager.__useCache: + return True + + #Check cache + if os.path.exists(OXA_FILEHD_CACHE_TEMPLATES+filepath): + return True + path = os.path.dirname(filepath) + + #Check remote + if os.path.exists(OXA_FILEHD_REMOTE_TEMPLATES+path): + #import from remote to cache + FileHdManager.logger.info("Importing image to cache directory:"+OXA_FILEHD_REMOTE_TEMPLATES+path+"->"+OXA_FILEHD_CACHE_TEMPLATES+path) + try: + #Copy all + FileHdManager.subprocessCall("/bin/cp "+ str(OXA_FILEHD_REMOTE_TEMPLATES+path)+" "+str(OXA_FILEHD_CACHE_TEMPLATES+path)) + except Exception as e: + return False + return True + + return False + + @staticmethod + def clone(vm): + + ##Check file existance in CACHE + #FileHdManager.logger.debug("Checking:"+FileHdManager.getHdPath(vm)) + if os.path.exists(FileHdManager.getHdPath(vm)): + raise VMalreadyExists("Another VM with the same name exists in the same project and slice:"+FileHdManager.debugVM(vm)) + + #FileHdManager.logger.debug("Checking:"+FileHdManager.getRemoteHdPath(vm)) + ##Check file existance in REMOTE + if os.path.exists(FileHdManager.getRemoteHdPath(vm)): + raise VMalreadyExists("Another VM with the same name exists in the same project and slice:"+FileHdManager.debugVM(vm)) + + if FileHdManager.__fileTemplateExistsOrImportFromRemote(vm.xen_configuration.hd_origin_path): + path= "" + try: + #TODO: user authentication + template_path=FileHdManager.getTemplatesPath(vm)+vm.xen_configuration.hd_origin_path + template_swap_path=FileHdManager.getTemplatesPath(vm)+vm.xen_configuration.hd_origin_path+"_swap" + vm_path=FileHdManager.getHdPath(vm) + swap_path=FileHdManager.getSwapPath(vm) + + FileHdManager.logger.debug("Trying to clone from:"+template_path+"->>"+vm_path) + + if not os.path.exists(os.path.dirname(vm_path)): + os.makedirs(os.path.dirname(vm_path)) + + count = (vm.xen_configuration.hd_size_mb*1024)/OXA_FILEHD_DD_BS_KB + if (vm.xen_configuration.hd_size_mb*1024)/OXA_FILEHD_DD_BS_KB > 0: + FileHdManager.logger.warning("HD size will be normalized") + count =int(count) + + #Create HD + FileHdManager.logger.info("Creating disks...") + if OXA_FILEHD_CREATE_SPARSE_DISK: + FileHdManager.logger.info("Main disk will be created as Sparse disk...") + FileHdManager.subprocessCall("/bin/dd if=/dev/zero of="+str(vm_path)+" bs="+str(OXA_FILEHD_DD_BS_KB)+"k count=1 seek="+str(count)) + else: + FileHdManager.subprocessCall("/bin/dd if=/dev/zero of="+str(vm_path)+" bs="+str(OXA_FILEHD_DD_BS_KB)+"k count="+str(count)) + + #Create Swap and mkswap + FileHdManager.logger.info("Creating swap disk...") + swapCount=int((OXA_DEFAULT_SWAP_SIZE_MB*1024)/OXA_FILEHD_DD_BS_KB) + FileHdManager.subprocessCall("/bin/dd if=/dev/zero of="+str(swap_path)+" bs="+str(OXA_FILEHD_DD_BS_KB)+"k count="+str(swapCount)) + FileHdManager.logger.info("Creating swap filesystem...") + FileHdManager.subprocessCall("/sbin/mkswap "+str(swap_path)) + + #Format + FileHdManager.logger.info("Creating EXT3 fs...") + FileHdManager.subprocessCall("/sbin/mkfs.ext3 -F -q "+str(vm_path)) + + #Untar disk contents + FileHdManager.logger.info("Uncompressing disk contents...") + path = FileHdManager.mount(vm) #mount + with open(os.devnull, 'w') as opendev: + FileHdManager.subprocessCall("/bin/tar -xvf "+str(template_path)+" -C "+str(path),stdout=opendev) + + except Exception as e: + FileHdManager.logger.error("Could not clone image to working directory: "+str(e)) + raise Exception("Could not clone image to working directory"+FileHdManager.debugVM(vm)) + finally: + try: + FileHdManager.umount(path) + except: + pass + + else: + raise Exception("Could not find origin hard-disk to clone"+FileHdManager.debugVM(vm)) + + @staticmethod + def delete(vm): + if not FileHdManager.isVMinRemoteFS(vm): + FileHdManager.moveVMToRemoteFS(vm) + os.remove(FileHdManager.getRemoteHdPath(vm)) + os.remove(FileHdManager.getRemoteSwapPath(vm)) + os.remove(FileHdManager.getRemoteConfigFilePath(vm)) + + #Mount/umount routines + @staticmethod + def mount(vm): + path = FileHdManager.getTmpMountedHdPath(vm) + + if not os.path.isdir(path): + os.makedirs(path) + + vm_path=FileHdManager.getHdPath(vm) + FileHdManager.subprocessCall('/bin/mount -o loop '+str(vm_path)+" "+str(path)) + + return path + + @staticmethod + def umount(path): + FileHdManager.subprocessCall('/bin/umount -d '+str(path)) + #remove dir + os.removedirs(path) + + + #Cache-Remote warehouse methods + @staticmethod + def isVMinRemoteFS(vm): + return os.path.exists(FileHdManager.getRemoteHdPath(vm)) + + @staticmethod + def isVMinCacheFS(vm): + return os.path.exists(FileHdManager.getHdPath(vm)) + + @staticmethod + def moveVMToRemoteFS(vm): + + #if Cache is not used skip + if not FileHdManager.__useCache: + return + + if FileHdManager.isVMinCacheFS(vm): + #create dirs if do not exist already + try: + os.makedirs(FileHdManager.getRemoteHdDirectory(vm)) + except Exception as e: + pass + #Move all files + shutil.move(FileHdManager.getHdPath(vm),FileHdManager.getRemoteHdPath(vm)) + shutil.move(FileHdManager.getSwapPath(vm),FileHdManager.getRemoteSwapPath(vm)) + shutil.move(FileHdManager.getConfigFilePath(vm),FileHdManager.getRemoteConfigFilePath(vm)) + else: + raise Exception("Cannot find VM in CACHE FS"+FileHdManager.debugVM(vm) ) + + @staticmethod + def moveVMToCacheFS(vm): + #if Cache is not used skip + if not FileHdManager.__useCache: + return + + if FileHdManager.isVMinRemoteFS(vm): + + if FileHdManager.isVMinCacheFS(vm): + raise Exception("Machine is already in Cache FS"+FileHdManager.debugVM(vm)) + + #create dirs if do not exist already + try: + os.makedirs(FileHdManager.getHdDirectory(vm)) + except Exception as e: + pass + + #Move all files + shutil.move(FileHdManager.getRemoteHdPath(vm),FileHdManager.getHdPath(vm)) + shutil.move(FileHdManager.getRemoteSwapPath(vm),FileHdManager.getSwapPath(vm)) + shutil.move(FileHdManager.getRemoteConfigFilePath(vm),FileHdManager.getConfigFilePath(vm)) + + else: + raise Exception("Cannot find VM in REMOTE FS"+FileHdManager.debugVM(vm)) + + + +import requests + +from httpretty import HTTPretty + +from social.p3 import urlparse +from social.utils import parse_qs, url_add_parameters + +from social.tests.models import User +from social.tests.backends.base import BaseBackendTest + + +class BaseOAuthTest(BaseBackendTest): + backend = None + backend_path = None + user_data_body = None + user_data_url = '' + user_data_content_type = 'application/json' + access_token_body = None + access_token_status = 200 + expected_username = '' + + def extra_settings(self): + return {'SOCIAL_AUTH_' + self.name + '_KEY': 'a-key', + 'SOCIAL_AUTH_' + self.name + '_SECRET': 'a-secret-key'} + + def _method(self, method): + return {'GET': HTTPretty.GET, + 'POST': HTTPretty.POST}[method] + + def handle_state(self, start_url, target_url): + start_query = parse_qs(urlparse(start_url).query) + redirect_uri = start_query.get('redirect_uri') + + if getattr(self.backend, 'STATE_PARAMETER', False): + if start_query.get('state'): + target_url = url_add_parameters(target_url, { + 'state': start_query['state'] + }) + + if redirect_uri and getattr(self.backend, 'REDIRECT_STATE', False): + redirect_query = parse_qs(urlparse(redirect_uri).query) + if redirect_query.get('redirect_state'): + target_url = url_add_parameters(target_url, { + 'redirect_state': redirect_query['redirect_state'] + }) + return target_url + + def auth_handlers(self, start_url): + target_url = self.handle_state(start_url, + self.strategy.build_absolute_uri( + self.complete_url + )) + HTTPretty.register_uri(HTTPretty.GET, + start_url, + status=301, + location=target_url) + HTTPretty.register_uri(HTTPretty.GET, + target_url, + status=200, + body='foobar') + HTTPretty.register_uri(self._method(self.backend.ACCESS_TOKEN_METHOD), + uri=self.backend.access_token_url(), + status=self.access_token_status, + body=self.access_token_body or '', + content_type='text/json') + if self.user_data_url: + HTTPretty.register_uri(HTTPretty.GET, + self.user_data_url, + body=self.user_data_body or '', + content_type=self.user_data_content_type) + return target_url + + def do_start(self): + start_url = self.backend.start().url + target_url = self.auth_handlers(start_url) + response = requests.get(start_url) + self.assertEqual(response.url, target_url) + self.assertEqual(response.text, 'foobar') + self.strategy.set_request_data(parse_qs(urlparse(target_url).query), + self.backend) + return self.backend.complete() + + +class OAuth1Test(BaseOAuthTest): + request_token_body = None + raw_complete_url = '/complete/{0}/?oauth_verifier=bazqux&' \ + 'oauth_token=foobar' + + def request_token_handler(self): + HTTPretty.register_uri(self._method(self.backend.REQUEST_TOKEN_METHOD), + self.backend.REQUEST_TOKEN_URL, + body=self.request_token_body, + status=200) + + def do_start(self): + self.request_token_handler() + return super(OAuth1Test, self).do_start() + + +class OAuth2Test(BaseOAuthTest): + raw_complete_url = '/complete/{0}/?code=foobar' + refresh_token_body = '' + + def refresh_token_arguments(self): + return {} + + def do_refresh_token(self): + self.do_login() + HTTPretty.register_uri(self._method(self.backend.REFRESH_TOKEN_METHOD), + self.backend.refresh_token_url(), + status=200, + body=self.refresh_token_body) + user = list(User.cache.values())[0] + social = user.social[0] + social.refresh_token(strategy=self.strategy, + **self.refresh_token_arguments()) + return user, social + +import sys +from sklearn.externals.six.moves import cStringIO as StringIO +import numpy as np +import scipy.sparse as sp +from sklearn.utils.testing import assert_equal +from sklearn.utils.testing import assert_almost_equal +from sklearn.utils.testing import assert_less +from sklearn.utils.testing import assert_raises_regexp +from sklearn.utils import check_random_state +from sklearn.manifold.t_sne import _joint_probabilities +from sklearn.manifold.t_sne import _kl_divergence +from sklearn.manifold.t_sne import _gradient_descent +from sklearn.manifold.t_sne import trustworthiness +from sklearn.manifold.t_sne import TSNE +from sklearn.manifold._utils import _binary_search_perplexity +from scipy.optimize import check_grad +from scipy.spatial.distance import pdist +from scipy.spatial.distance import squareform + + +def test_gradient_descent_stops(): + # Test stopping conditions of gradient descent. + class ObjectiveSmallGradient: + def __init__(self): + self.it = -1 + + def __call__(self, _): + self.it += 1 + return (10 - self.it) / 10.0, np.array([1e-5]) + + def flat_function(_): + return 0.0, np.ones(1) + + # Gradient norm + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100, + n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, + min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert_equal(error, 1.0) + assert_equal(it, 0) + assert("gradient norm" in out) + + # Error difference + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100, + n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, + min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert_equal(error, 0.9) + assert_equal(it, 1) + assert("error difference" in out) + + # Maximum number of iterations without improvement + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + flat_function, np.zeros(1), 0, n_iter=100, + n_iter_without_progress=10, momentum=0.0, learning_rate=0.0, + min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert_equal(error, 0.0) + assert_equal(it, 11) + assert("did not make any progress" in out) + + # Maximum number of iterations + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11, + n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, + min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert_equal(error, 0.0) + assert_equal(it, 10) + assert("Iteration 10" in out) + + +def test_binary_search(): + # Test if the binary search finds Gaussians with desired perplexity. + random_state = check_random_state(0) + distances = random_state.randn(50, 2) + distances = distances.dot(distances.T) + np.fill_diagonal(distances, 0.0) + desired_perplexity = 25.0 + P = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + P = np.maximum(P, np.finfo(np.double).eps) + mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i]))) + for i in range(P.shape[0])]) + assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) + + +def test_gradient(): + # Test gradient of Kullback-Leibler divergence. + random_state = check_random_state(0) + + n_samples = 50 + n_features = 2 + n_components = 2 + alpha = 1.0 + + distances = random_state.randn(n_samples, n_features) + distances = distances.dot(distances.T) + np.fill_diagonal(distances, 0.0) + X_embedded = random_state.randn(n_samples, n_components) + + P = _joint_probabilities(distances, desired_perplexity=25.0, + verbose=0) + fun = lambda params: _kl_divergence(params, P, alpha, n_samples, + n_components)[0] + grad = lambda params: _kl_divergence(params, P, alpha, n_samples, + n_components)[1] + assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, + decimal=5) + + +def test_trustworthiness(): + # Test trustworthiness score. + random_state = check_random_state(0) + + # Affine transformation + X = random_state.randn(100, 2) + assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0) + + # Randomly shuffled + X = np.arange(100).reshape(-1, 1) + X_embedded = X.copy() + random_state.shuffle(X_embedded) + assert_less(trustworthiness(X, X_embedded), 0.6) + + # Completely different + X = np.arange(5).reshape(-1, 1) + X_embedded = np.array([[0], [2], [4], [1], [3]]) + assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) + + +def test_preserve_trustworthiness_approximately(): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + X = random_state.randn(100, 2) + for init in ('random', 'pca'): + tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, + init=init, random_state=0) + X_embedded = tsne.fit_transform(X) + assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0, + decimal=1) + + +def test_fit_csr_matrix(): + # X can be a sparse matrix. + random_state = check_random_state(0) + X = random_state.randn(100, 2) + X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0 + X_csr = sp.csr_matrix(X) + tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, + random_state=0) + X_embedded = tsne.fit_transform(X_csr) + assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, + decimal=1) + + +def test_preserve_trustworthiness_approximately_with_precomputed_distances(): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + X = random_state.randn(100, 2) + D = squareform(pdist(X), "sqeuclidean") + tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, + metric="precomputed", random_state=0) + X_embedded = tsne.fit_transform(D) + assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1, + precomputed=True), 1.0, decimal=1) + + +def test_early_exaggeration_too_small(): + # Early exaggeration factor must be >= 1. + tsne = TSNE(early_exaggeration=0.99) + assert_raises_regexp(ValueError, "early_exaggeration .*", + tsne.fit_transform, np.array([[0.0]])) + + +def test_too_few_iterations(): + # Number of gradient descent iterations must be at least 200. + tsne = TSNE(n_iter=199) + assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform, + np.array([[0.0]])) + + +def test_non_square_precomputed_distances(): + # Precomputed distance matrices must be square matrices. + tsne = TSNE(metric="precomputed") + assert_raises_regexp(ValueError, ".* square distance matrix", + tsne.fit_transform, np.array([[0.0], [1.0]])) + + +def test_init_not_available(): + # 'init' must be 'pca' or 'random'. + assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'", + TSNE, init="not available") + + +def test_distance_not_available(): + # 'metric' must be valid. + tsne = TSNE(metric="not available") + assert_raises_regexp(ValueError, "Unknown metric not available.*", + tsne.fit_transform, np.array([[0.0], [1.0]])) + + +def test_pca_initialization_not_compatible_with_precomputed_kernel(): + # Precomputed distance matrices must be square matrices. + tsne = TSNE(metric="precomputed", init="pca") + assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be " + "used with metric=\"precomputed\".", + tsne.fit_transform, np.array([[0.0], [1.0]])) + + +def test_verbose(): + random_state = check_random_state(0) + tsne = TSNE(verbose=2) + X = random_state.randn(5, 2) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert("[t-SNE]" in out) + assert("Computing pairwise distances" in out) + assert("Computed conditional probabilities" in out) + assert("Mean sigma" in out) + assert("Finished" in out) + assert("early exaggeration" in out) + assert("Finished" in out) + + +def test_chebyshev_metric(): + # t-SNE should allow metrics that cannot be squared (issue #3526). + random_state = check_random_state(0) + tsne = TSNE(metric="chebyshev") + X = random_state.randn(5, 2) + tsne.fit_transform(X) + + +def test_reduction_to_one_component(): + # t-SNE should allow reduction to one component (issue #4154). + random_state = check_random_state(0) + tsne = TSNE(n_components=1) + X = random_state.randn(5, 2) + X_embedded = tsne.fit(X).embedding_ + assert(np.all(np.isfinite(X_embedded))) + +# -*- coding: utf-8 -*- +''' +Mepinta +Copyright (c) 2011-2012, Joaquin G. Duo + +This file is part of Mepinta. + +Mepinta is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Mepinta is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Mepinta. If not, see . +''' + +from getDefaultContext import getDefaultContext +from pipeline_backend.logging.logging import LOG_INFO +from plugins_tests.base.K3dMeshPluginTest import K3dMeshPluginTest + + +class SelectFaceByNumber(K3dMeshPluginTest): + + def __post_init__(self): + import plugins.c_and_cpp.processors.k3dv1.mesh.selection.bynumber.SelectFaceByNumber as select + self.testedProcessors.append(select) + + def _createInputMesh(self, test_pline): + import plugins.c_and_cpp.processors.k3dv1.mesh.input.file.OBJMeshReader as obj_rdr + obj_node = test_pline.append(obj_rdr) + test_pline.setValue(obj_node.inputs.file, '/home/jduo/output.obj') + test_pline.defaultMarked.append(obj_node.functions.loadMesh) + return obj_node + + def definePluginPipeline(self, test_pline): + select = self.testedProcessors[0] + n_sel = test_pline.append(select) + import plugins.c_and_cpp.processors.k3dv1.mesh.modifiers.polyhedron.ExtrudeFaces as ext_fac + n_ext = test_pline.append(ext_fac) + + test_pline.setValue(n_sel.inputs.primitive_number, 0) + test_pline.setValue(n_sel.inputs.face_index, 0) + test_pline.setValue(n_ext.inputs.segments, 2) + test_pline.setValue(n_ext.inputs.distance, 4.0) + + def getTimeParameters(self): + return self.time.startEndStepSleep(end=-15., step=2, sleep=0.1) + + def stressPipeline(self, test_pline, time): + nodes = test_pline.getNodesDict() + node_sel = nodes['SelectFaceByNumber 1'] + test_pline.setValue(node_sel.inputs.face_index, time *2) + node_ext = nodes['ExtrudeFaces 1'] + test_pline.setValue(node_ext.inputs.distance, 1.0 + time/4.0) + test_pline.setValue(node_ext.inputs.segments, 1.0 + time/4.0) + + +test = SelectFaceByNumber + +if __name__ == "__main__": + sfbn = SelectFaceByNumber(getDefaultContext(LOG_INFO)) + +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module tests some of the methods related to the ``ECSV`` +reader/writer. + +Requires `pyyaml `_ to be installed. +""" +import os +import copy +import sys + +import numpy as np + +from ....table import Table, Column +from ....table.table_helpers import simple_table + +from ....tests.helper import pytest +from ....extern.six.moves import StringIO +from ..ecsv import DELIMITERS +from ... import ascii +from .... import units as u + +try: + import yaml # pylint: disable=W0611 + HAS_YAML = True +except ImportError: + HAS_YAML = False + +DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', + 'uint64', 'float16', 'float32', 'float64', 'float128', + 'str'] +if os.name == 'nt' or sys.maxsize <= 2**32: + DTYPES.remove('float128') + +T_DTYPES = Table() + +for dtype in DTYPES: + if dtype == 'bool': + data = np.array([False, True, False]) + elif dtype == 'str': + data = np.array(['ab 0', 'ab, 1', 'ab2']) + else: + data = np.arange(3, dtype=dtype) + c = Column(data, unit='m / s', description='descr_' + dtype, + meta={'meta ' + dtype: 1}) + T_DTYPES[dtype] = c + +T_DTYPES.meta['comments'] = ['comment1', 'comment2'] + +# Corresponds to simple_table() +SIMPLE_LINES = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - {name: a, datatype: int64}', + '# - {name: b, datatype: float64}', + '# - {name: c, datatype: string}', + 'a b c', + '1 1.0 c', + '2 2.0 d', + '3 3.0 e'] + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_simple(): + """ + Write a simple table with common types. This shows the compact version + of serialization with one line per column. + """ + t = simple_table() + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == SIMPLE_LINES + +@pytest.mark.skipif('not HAS_YAML') +def test_write_full(): + """ + Write a full-featured table with common types and explicitly checkout output + """ + t = T_DTYPES['bool', 'int64', 'float64', 'str'] + lines = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - name: bool', + '# unit: m / s', + '# datatype: bool', + '# description: descr_bool', + '# meta: {meta bool: 1}', + '# - name: int64', + '# unit: m / s', + '# datatype: int64', + '# description: descr_int64', + '# meta: {meta int64: 1}', + '# - name: float64', + '# unit: m / s', + '# datatype: float64', + '# description: descr_float64', + '# meta: {meta float64: 1}', + '# - name: str', + '# unit: m / s', + '# datatype: string', + '# description: descr_str', + '# meta: {meta str: 1}', + '# meta: !!omap', + '# - comments: [comment1, comment2]', + 'bool int64 float64 str', + 'False 0 0.0 "ab 0"', + 'True 1 1.0 "ab, 1"', + 'False 2 2.0 ab2'] + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == lines + +@pytest.mark.skipif('not HAS_YAML') +def test_write_read_roundtrip(): + """ + Write a full-featured table with all types and see that it round-trips on + readback. Use both space and comma delimiters. + """ + t = T_DTYPES + for delimiter in DELIMITERS: + out = StringIO() + t.write(out, format='ascii.ecsv', delimiter=delimiter) + + t2s = [Table.read(out.getvalue(), format='ascii.ecsv'), + Table.read(out.getvalue(), format='ascii'), + ascii.read(out.getvalue()), + ascii.read(out.getvalue(), format='ecsv', guess=False), + ascii.read(out.getvalue(), format='ecsv')] + for t2 in t2s: + assert t.meta == t2.meta + for name in t.colnames: + assert t[name].attrs_equal(t2[name]) + assert np.all(t[name] == t2[name]) + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter(): + """ + Passing a delimiter other than space or comma gives an exception + """ + out = StringIO() + with pytest.raises(ValueError) as err: + T_DTYPES.write(out, format='ascii.ecsv', delimiter='|') + assert 'only space and comma are allowed' in str(err.value) + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_header_start(): + """ + Bad header without initial # %ECSV x.x + """ + lines = copy.copy(SIMPLE_LINES) + lines[0] = '# %ECV 0.9' + with pytest.raises(ascii.InconsistentTableError): + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter_input(): + """ + Illegal delimiter in input + """ + lines = copy.copy(SIMPLE_LINES) + lines.insert(2, '# delimiter: |') + with pytest.raises(ValueError) as err: + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + assert 'only space and comma are allowed' in str(err.value) + +@pytest.mark.skipif('not HAS_YAML') +def test_multidim_input(): + """ + Multi-dimensional column in input + """ + t = Table([np.arange(4).reshape(2, 2)], names=['a']) + out = StringIO() + with pytest.raises(ValueError) as err: + t.write(out, format='ascii.ecsv') + assert 'ECSV format does not support multidimensional column' in str(err.value) + +@pytest.mark.skipif('not HAS_YAML') +def test_round_trip_empty_table(): + """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" + t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c']) + out = StringIO() + t.write(out, format='ascii.ecsv') + t2 = Table.read(out.getvalue(), format='ascii.ecsv') + assert t.dtype == t2.dtype + assert len(t2) == 0 + + +@pytest.mark.skipif('not HAS_YAML') +def test_csv_ecsv_colnames_mismatch(): + """ + Test that mismatch in column names from normal CSV header vs. + ECSV YAML header raises the expected exception. + """ + lines = copy.copy(SIMPLE_LINES) + header_index = lines.index('a b c') + lines[header_index] = 'a b d' + with pytest.raises(ValueError) as err: + ascii.read(lines, format='ecsv') + assert "column names from ECSV header ['a', 'b', 'c']" in str(err) + + +@pytest.mark.skipif('not HAS_YAML') +def test_regression_5604(): + """ + See https://github.com/astropy/astropy/issues/5604 for more. + """ + t = Table() + t.meta = {"foo": 5*u.km, "foo2": u.s} + t["bar"] = [7]*u.km + + out = StringIO() + t.write(out, format="ascii.ecsv") + + assert '!astropy.units.Unit' in out.getvalue() + assert '!astropy.units.Quantity' in out.getvalue() + +# Core imports +import Queue +import thread +import time + +# Library imports +from mock import patch, MagicMock, PropertyMock + +# Package imports +from ..core.Thread_Manager import Thread_Manager +from ..core.Threadable import Threadable +from ..settings.Arguments import Arguments +from ..reconstruction.Buffer import Buffer +from ..zigbee.Packet import Packet +from ..zigbee.RF_Sensor import RF_Sensor, DisabledException +from ..zigbee.TDMA_Scheduler import TDMA_Scheduler +from settings import SettingsTestCase +from zigbee_packet import ZigBeePacketTestCase + +class ZigBeeRFSensorTestCase(SettingsTestCase, ZigBeePacketTestCase): + """ + Test case base class that provides the necessities to create one of the + `RF_Sensor` types of objects. + """ + + def setUp(self): + super(ZigBeeRFSensorTestCase, self).setUp() + + self.arguments = Arguments("settings.json", ["--rf-sensor-id", "1"]) + + self.thread_manager = Thread_Manager() + self.location_callback = MagicMock(return_value=((0, 0), 0)) + self.receive_callback = MagicMock() + self.valid_callback = MagicMock(return_value=(True, True)) + + def _create_sensor(self, sensor_type, **kwargs): + """ + Create the RF sensor object. The `sensor_type` is a class that is either + `RF_Sensor` or a subclass thereof. Additional keyword arguments are + passed through to the object initialization. + + The resulting `RF_Sensor` object is returned. + """ + + return sensor_type(self.arguments, self.thread_manager, + self.location_callback, self.receive_callback, + self.valid_callback, **kwargs) + +class TestZigBeeRFSensor(ZigBeeRFSensorTestCase): + def setUp(self): + super(TestZigBeeRFSensor, self).setUp() + + self.settings = self.arguments.get_settings("zigbee_base") + + type_mock = PropertyMock(return_value="zigbee_base") + with patch.object(RF_Sensor, "type", new_callable=type_mock): + self.rf_sensor = self._create_sensor(RF_Sensor) + + def test_initialization(self): + # Providing an uncallable callback raises an exception. + with self.assertRaises(TypeError): + RF_Sensor(self.arguments, self.thread_manager, None, None, None) + + # Not providing an `Arguments` object raises an exception. + with self.assertRaises(ValueError): + RF_Sensor(None, self.thread_manager, self.location_callback, + self.receive_callback, self.valid_callback) + + # The settings must be loaded when an `Arguments` object is provided. + self.assertEqual(self.rf_sensor._settings, self.settings) + + # Common member variables must be initialized. + self.assertEqual(self.rf_sensor._id, self.settings.get("rf_sensor_id")) + self.assertEqual(self.rf_sensor._number_of_sensors, + self.settings.get("number_of_sensors")) + self.assertEqual(self.rf_sensor._address, None) + self.assertEqual(self.rf_sensor._connection, None) + self.assertEqual(self.rf_sensor._buffer, None) + self.assertIsInstance(self.rf_sensor._scheduler, TDMA_Scheduler) + self.assertIsInstance(self.rf_sensor._packets, Queue.Queue) + self.assertEqual(self.rf_sensor._packets.qsize(), 0) + self.assertIsInstance(self.rf_sensor._custom_packets, Queue.Queue) + self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0) + + self.assertEqual(self.rf_sensor._joined, False) + self.assertEqual(self.rf_sensor._activated, False) + self.assertEqual(self.rf_sensor._started, False) + + self.assertEqual(self.rf_sensor._loop_delay, self.settings.get("loop_delay")) + + self.assertTrue(hasattr(self.rf_sensor._location_callback, "__call__")) + self.assertTrue(hasattr(self.rf_sensor._receive_callback, "__call__")) + self.assertTrue(hasattr(self.rf_sensor._valid_callback, "__call__")) + + def test_id(self): + # The RF sensor ID must be returned. + self.assertEqual(self.rf_sensor.id, self.rf_sensor._id) + + def test_number_of_sensors(self): + # The number of sensors must be returned. + self.assertEqual(self.rf_sensor.number_of_sensors, + self.rf_sensor._number_of_sensors) + + def test_buffer(self): + # Providing an invalid buffer raises an exception. + with self.assertRaises(ValueError): + self.rf_sensor.buffer = [] + + # A valid buffer must be set and returned. + buffer = Buffer(self.settings) + self.rf_sensor.buffer = buffer + self.assertEqual(self.rf_sensor.buffer, buffer) + + def test_type(self): + # Verify that the interface requires subclasses to implement + # the `type` property. + with self.assertRaises(NotImplementedError): + dummy = self.rf_sensor.type + + def test_identity(self): + # The identity must include the ID, address and network join status. + self.assertEqual(self.rf_sensor.identity, { + "id": self.rf_sensor._id, + "address": self.rf_sensor._address, + "joined": self.rf_sensor._joined + }) + + def test_activate(self): + with patch.object(RF_Sensor, "_setup") as setup_mock: + with patch.object(thread, "start_new_thread") as start_new_thread_mock: + self.rf_sensor.activate() + + # The sensor must be setup and the loop thread must be started. + self.assertTrue(self.rf_sensor._activated) + self.assertEqual(setup_mock.call_count, 1) + self.assertEqual(start_new_thread_mock.call_count, 1) + + def test_deactivate(self): + connection_mock = MagicMock() + + with patch.object(RF_Sensor, "_setup"): + with patch.object(thread, "start_new_thread"): + self.rf_sensor.activate() + self.rf_sensor._connection = connection_mock + self.rf_sensor.deactivate() + + # The connection must be closed and the sensor must be deactivated. + self.assertEqual(self.rf_sensor._activated, False) + self.assertEqual(connection_mock.close.call_count, 1) + self.assertEqual(self.rf_sensor._connection, None) + + def test_start(self): + # The sensor must be started for sending RSSI broadcast/ground + # station packets. Make sure that the schedule will try to shift again + # when the measurements start. + self.rf_sensor.start() + self.assertTrue(self.rf_sensor._started) + self.assertEqual(self.rf_sensor._packets.qsize(), 0) + self.assertNotEqual(self.rf_sensor._scheduler.timestamp, 0.0) + + def test_stop(self): + # Pretend that we start the RF sensor so that we know that `stop` + # functions. + self.rf_sensor.start() + + # The sensor must be stopped for sending custom packets. Make sure that + # the scheduler timestamp is reset, so that it updates correctly in + # case we restart the sensor measurements. + self.rf_sensor.stop() + self.assertEqual(self.rf_sensor._started, False) + self.assertEqual(self.rf_sensor._scheduler.timestamp, 0.0) + + def test_enqueue(self): + # Providing a packet that is not a `Packet` object raises an exception. + with self.assertRaises(TypeError): + self.rf_sensor.enqueue({ + "foo": "bar" + }) + + # Providing a private packet raises an exception. + with self.assertRaises(ValueError): + self.packet.set("specification", "rssi_broadcast") + self.rf_sensor.enqueue(self.packet) + + # Packets that do not have a destination must be broadcasted. + # We subtract one because we do not send to ourself. + self.packet.set("specification", "waypoint_clear") + self.packet.set("to_id", 2) + self.rf_sensor.enqueue(self.packet) + + self.assertEqual(self.rf_sensor._custom_packets.qsize(), + self.rf_sensor.number_of_sensors - 1) + for to_id in xrange(1, self.rf_sensor.number_of_sensors + 1): + if to_id == self.rf_sensor.id: + continue + + item = self.rf_sensor._custom_packets.get() + self.assertIsInstance(item["packet"], Packet) + self.assertEqual(item["packet"].get_all(), { + "specification": "waypoint_clear", + "to_id": 2 + }) + self.assertEqual(item["to"], to_id) + + self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0) + + # Packets that do contain a destination must be enqueued directly. + self.rf_sensor.enqueue(self.packet, to=2) + + self.assertEqual(self.rf_sensor._custom_packets.qsize(), 1) + self.assertEqual(self.rf_sensor._custom_packets.get(), { + "packet": self.packet, + "to": 2 + }) + self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0) + + def test_discover(self): + # Providing an invalid callback raises an exception. + with self.assertRaisesRegexp(TypeError, "callback is not callable"): + self.rf_sensor.discover(None) + + callback_mock = MagicMock() + + # Providing invalid required sensors raises an exception. + with self.assertRaisesRegexp(TypeError, "must be a `set`"): + self.rf_sensor.discover(callback_mock, required_sensors=2) + + # Providing an set of required sensors that cannot be discovered raises + # an exception. + with self.assertRaisesRegexp(ValueError, "only contain vehicle sensors"): + self.rf_sensor.discover(callback_mock, required_sensors=set([0])) + + def test_setup(self): + # Verify that the interface requires subclasses to implement + # the `_setup` method. + with self.assertRaises(NotImplementedError): + self.rf_sensor._setup() + + def test_loop(self): + self.rf_sensor._activated = True + + with patch.object(Threadable, "interrupt") as interrupt_mock: + with patch.object(RF_Sensor, "_loop_body") as loop_body_mock: + # The loop body and interrupt handler must be called when + # an exception other than a `DisabledException` is raised. + loop_body_mock.configure_mock(side_effect=RuntimeError) + self.rf_sensor._loop() + loop_body_mock.assert_called_once_with() + interrupt_mock.assert_called_once_with() + + with patch.object(Threadable, "interrupt") as interrupt_mock: + with patch.object(RF_Sensor, "_loop_body") as loop_body_mock: + # The loop body must be called when a `DisabledException` is + # raised, but nothing else must happen. + loop_body_mock.configure_mock(side_effect=DisabledException) + self.rf_sensor._loop() + loop_body_mock.assert_called_once_with() + interrupt_mock.assert_not_called() + + def test_loop_body(self): + with patch.object(RF_Sensor, "_send_custom_packets") as send_custom_packets_mock: + # Send custom packets when the sensor has been activated, + # but not started. + self.rf_sensor._loop_body() + send_custom_packets_mock.assert_called_once_with() + + # If the current time is inside an allocated slot, then packets + # may be sent. + in_slot_mock = PropertyMock(return_value=True) + with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock): + with patch.object(TDMA_Scheduler, "update") as update_mock: + with patch.object(RF_Sensor, "_send") as send_mock: + self.rf_sensor._started = True + + # Send RSSI broadcast/ground station packets when the sensor + # has been activated and started. + self.rf_sensor._loop_body() + + update_mock.assert_called_once_with() + send_mock.assert_called_once_with() + + # If the current time is not inside an allocated slot, then no + # packets may be sent. + in_slot_mock = PropertyMock(return_value=False) + with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock): + with patch.object(TDMA_Scheduler, "update") as update_mock: + with patch.object(RF_Sensor, "_send") as send_mock: + self.rf_sensor._started = True + + # Send RSSI broadcast/ground station packets when the sensor + # has been activated and started. + self.rf_sensor._loop_body() + + update_mock.assert_not_called() + send_mock.assert_not_called() + + @patch.object(RF_Sensor, "_send_tx_frame") + def test_send(self, send_tx_frame_mock): + self.rf_sensor._packets.put(self.rf_sensor._create_rssi_broadcast_packet(2)) +