code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
import RPi.GPIO as GPIO
import time
HIN = 8
LIN = 10
freq = 500
class Motor:
def __init__(self, HIN=HIN, LIN=LIN, freq=freq):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(HIN, GPIO.OUT)
GPIO.setup(LIN, GPIO.OUT)
self.high = GPIO.PWM(HIN, freq)
self.low = GPIO.PWM(LIN, freq)
self.low.start(0)
def setSpeed(self, speed):
if speed < 0:
self.high.start(0)
elif speed > 100:
self.high.start(100)
else:
self.high.start(speed)
| [
[
[
7,
23
],
[
140,
144
],
[
153,
157
],
[
173,
177
],
[
189,
193
],
[
207,
211
],
[
223,
227
],
[
262,
266
],
[
302,
306
]
],
[
[
31,
35
]
],
[
[
37,
40
],
[
106,
109
]
],
[
[
45,
48
],
[
115,
118
]
],
[
[
54,
58
],
[
125,
129
]
],
[
[
72,
77
]
]
] |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Get hardware info from Bpod
"""
from pybpodapi.protocol import Bpod
from confapp import conf
my_bpod = Bpod()
my_bpod.close()
print("Target Bpod firmware version: ", conf.TARGET_BPOD_FIRMWARE_VERSION)
print("Firmware version (read from device): ", my_bpod.hardware.firmware_version)
print("Machine type version (read from device): ", my_bpod.hardware.machine_type)
| [
[
[
114,
118
],
[
156,
160
]
],
[
[
139,
143
],
[
221,
225
]
],
[
[
146,
153
],
[
164,
171
],
[
303,
310
],
[
389,
396
]
]
] |
import boto3
class DynamoDB(object):
def __init__(self, table_name):
self.resource = self._resource()
self.client = self._client()
self.table = self.resource.Table(table_name)
self.table_name = table_name
def _resource(self):
return boto3.resource('dynamodb')
def _client(self):
return boto3.client('dynamodb')
def put_item(self, item):
return self.table.put_item(Item=item)
def get_item(self, key, value):
return self.table.get_item(Key={key: value})
def get_scan_paginator(self, attributes, page_size=100):
paginator = self.client.get_paginator('scan')
for page in paginator.paginate(
TableName=self.table_name,
AttributesToGet=[attributes],
PaginationConfig={'PageSize': page_size}):
yield page
| [
[
[
7,
12
],
[
284,
289
],
[
350,
355
]
],
[
[
21,
29
]
]
] |
CCSettingsList = {'Simulink.SolverCC' : # Solver
{
'StartTime' : '0.0',
'StopTime' : 'inf',
'SolverMode' : 'SingleTasking',
'Solver' : 'FixedStepDiscrete',
'SolverName' : 'FixedStepDiscrete',
#Defined by Solver and SolverName 'SolverType' : 'Fixed-Step',
'AutoInsertRateTranBlk' : 'off'
},
'Simulink.DataIOCC' : # DataIO
{ 'SaveFormat' : 'StructureWithTime'},
'Simulink.OptimizationCC' : # Optimization
{
'BlockReduction' : 'off',
'BooleanDataType' : 'on',
'ConditionallyExecuteInputs' : 'off',
'UseSpecifiedMinMax' : 'off',
'ExpressionFolding' : 'off',
'RollThreshold' : 5,
'ZeroExternalMemoryAtStartup' : 'on',
'ZeroInternalMemoryAtStartup' : 'on',
'NoFixptDivByZeroProtection' : 'on',
'EfficientFloat2IntCast' : 'off',
'EfficientMapNaN2IntZero' : 'off',
'LifeSpan' : 'inf',
'InitFltsAndDblsToZero' : 'off'
},
'Simulink.DebuggingCC' : #Diag_Signal_Data
{
'RTPrefix' : 'error',
'ArrayBoundsChecking' : 'none',
'SignalInfNanChecking' : 'error',
'SignalRangeChecking' : 'error',
'CheckMatrixSingularityMsg' : 'error',
'IntegerOverflowMsg' : 'error',
'UnderSpecifiedDataTypeMsg' : 'error',
'UniqueDataStoreMsg' : 'error',
# 'Diag_Data_Stores' :
'ReadBeforeWriteMsg' : 'EnableAllAsError',
'WriteAfterWriteMsg' : 'EnableAllAsError',
'WriteAfterReadMsg' : 'EnableAllAsError',
'MultiTaskDSMMsg' : 'error',
# 'Diag_Solver' :
'AlgebraicLoopMsg' : 'error',
'ArtificialAlgebraicLoopMsg' : 'error',
'BlockPriorityViolationMsg' : 'error',
'SolverPrmCheckMsg' : 'error',
'UnknownTsInhSupMsg' : 'error',
'StateNameClashWarn' : 'warning',
# 'Diag_Saving' :
'SaveWithDisabledLinksMsg' : 'warning',
'SaveWithParameterizedLinksMsg' : 'warning',
# 'Diag_Init' :
'CheckSSInitialOutputMsg' : 'on',
'CheckExecutionContextPreStartOutputMsg' : 'on',
'CheckExecutionContextRuntimeOutputMsg' : 'on',
# 'Diag' :
'SignalResolutionControl' : 'UseLocalSettings',
# 'Diag_Sample_Time' :
'InheritedTsInSrcMsg' : 'warning',
'DiscreteInheritContinuousMsg' : 'error',
'MultiTaskCondExecSysMsg' : 'error',
'MultiTaskRateTransMsg' : 'error',
'SingleTaskRateTransMsg' : 'error',
'TasksWithSamePriorityMsg' : 'error',
'SigSpecEnsureSampleTimeMsg' : 'error',
# 'Diag_Data_Type' :
'Int32ToFloatConvMsg' : 'warning',
'UnnecessaryDatatypeConvMsg' : 'warning',
'VectorMatrixConversionMsg' : 'error',
# 'Diag_Parameter' :
'ParameterDowncastMsg' : 'error',
'ParameterOverflowMsg' : 'error',
'ParameterUnderflowMsg' : 'error',
'ParameterPrecisionLossMsg' : 'warning',
'ParameterTunabilityLossMsg' : 'error',
# 'Diag_Function_Call' :
'InvalidFcnCallConnMsg' : 'error',
'FcnCallInpInsideContextMsg' : 'Enable All',
# 'Diag_Sig_Connectivity' :
'SignalLabelMismatchMsg' : 'warning',
'UnconnectedInputMsg' : 'error',
'UnconnectedOutputMsg' : 'error',
'UnconnectedLineMsg' : 'error',
# 'Diag_Compatibility' :
'SFcnCompatibilityMsg' : 'error',
# 'Diag_Bus_Connectivity' :
'BusObjectLabelMismatch' : 'error',
'RootOutportRequireBusObject' : 'error',
'StrictBusMsg' : 'ErrorOnBusTreatedAsVector',
# 'Diag_Debug' :
'AssertControl' : 'DisableAll',
# 'Diag_Model_Referencing' :
'ModelReferenceIOMsg' : 'error',
'ModelReferenceVersionMismatchMessage' : 'none',
'ModelReferenceIOMismatchMessage' : 'error',
'ModelReferenceCSMismatchMessage' : 'warning',
'ModelReferenceDataLoggingMessage' : 'error'
},
'Simulink.HardwareCC' : #HW_Implementation
{
'ProdShiftRightIntArith' : 'on',
'ProdHWDeviceType' : 'Freescale->MPC82xx'
},
'Simulink.ModelReferenceCC' : #Model_Referencing
{
'UpdateModelReferenceTargets' : 'IfOutOfDate',
'ModelReferenceNumInstancesAllowed' : 'Single',
'ModelReferencePassRootInputsByReference' : 'on',
'ModelReferenceMinAlgLoopOccurrences' : 'off'
},
'Simulink.RTWCC' : # RTW
{
'IncludeHyperlinkInReport' : 'on',
'GenerateTraceInfo' : 'on',
'GenerateTraceReport' : 'on',
'GenerateTraceReportSl' : 'on',
'GenerateTraceReportSf' : 'on',
'GenerateTraceReportEml' : 'on',
'ObjectivePriorities' : ['Traceability','Safety precaution'],
'CheckMdlBeforeBuild' : 'Warning'
},
'Simulink.CodeAppCC' : # RTW_Code_Appearance
{
'ForceParamTrailComments' : 'on',
'GenerateComments' : 'on',
'MaxIdLength' : 31,
'ShowEliminatedStatement' : 'on',
'SimulinkDataObjDesc' : 'on',
'SFDataObjDesc' : 'on',
'MangleLength' : 4,
'CustomSymbolStrGlobalVar' : '$R$N$M',
'CustomSymbolStrType' : '$N$R$M',
'CustomSymbolStrField' : '$N$M',
'CustomSymbolStrFcn' : '$R$N$M$F',
'CustomSymbolStrFcnArg' : 'rt$I$N$M',
'CustomSymbolStrBlkIO' : 'rtb_$N$M',
'CustomSymbolStrTmpVar' : '$N$M',
'CustomSymbolStrMacro' : '$R$N$M_D',
'CustomCommentsFcn' : 'taxibot_comments_mptfun.m',
'DefineNamingRule' : 'None',
'ParamNamingRule' : 'None',
'SignalNamingRule' : 'None',
'InsertBlockDesc' : 'on',
'SimulinkBlockComments' : 'on',
'EnableCustomComments' : 'on',
'InlinedPrmAccess' : 'Literals',
'ReqsInCode' : 'on'
},
'Simulink.ERTTargetCC' : # RTW_ERT_Target
{
'TargetFunctionLibrary' : 'C89/90 (ANSI)',
'ERTMultiwordLength' : 256,
'GenerateSampleERTMain' : 'off',
'IncludeMdlTerminateFcn' : 'off',
'GeneratePreprocessorConditionals' : 'Enable all',
'CombineOutputUpdateFcns' : 'on',
'SuppressErrorStatus' : 'on',
'SupportAbsoluteTime' : 'off',
'MatFileLogging' : 'off',
'SupportNonFinite' : 'off',
'SupportComplex' : 'off',
'SupportContinuousTime' : 'off',
'SupportNonInlinedSFcns' : 'off',
'SupportVariableSizeSignals' : 'off',
'ParenthesesLevel' : 'Maximum',
'PortableWordSizes' : 'off',
'GenerateASAP2' : 'on',
'InlinedParameterPlacement' : 'Hierarchical',
'ERTSrcFileBannerTemplate' : 'taxibot_code_c_template.cgt',
'ERTHdrFileBannerTemplate' : 'taxibot_code_h_template.cgt',
'ERTDataSrcFileTemplate' : 'taxibot_data_c_template.cgt',
'ERTDataHdrFileTemplate' : 'taxibot_data_h_template.cgt',
'GRTInterface' : 'off',
'PreserveExpressionOrder' : 'on',
'PreserveIfCondition' : 'on',
'ConvertIfToSwitch' : 'off',
'EnableUserReplacementTypes' : 'on',
'UtilityFuncGeneration' : 'Shared location'
}}
DataStoreCC = { # Checks Rule HISL_0013 A
'HISL_0013 A': {'UniqueDataStoreMsg' : 'error',
'ReadBeforeWriteMsg' : 'EnableAllAsError',
'WriteAfterWriteMsg' : 'EnableAllAsError',
'WriteAfterReadMsg' : 'EnableAllAsError',
'MultiTaskDSMMsg' : 'error'},
# Checks Rule HISL_0005 C
'HISL_0005 C': {'CheckMatrixSingularityMsg' : 'error'}
}
AllowedOtherBlocks = {
'BusCreator' : [],
'BusSelector' : [],
'Concatenate' : [],
'Mux' : [],
'Demux' : [],
'From' : [],
'Goto' : [],
'GotoTagVisibility' : [],
'Merge' : [],
'Inport' : [],
'Outport' : [],
'Terminator' : [],
'Constant' : ['Value'],
'If' : [],
'SwitchCase' : [],
'RateTransition' : [],
'DataTypeConversion' : [],
'Lookup' : ['InputValues', 'Table'],
'Lookup2D' : ['RowIndex', 'ColumnIndex', 'Table'],
'Chart' : [],
'UnitDelay' : ['X0'],
'DiscreteIntegrator' : ['InitialCondition'],
'DiscreteTransferFcn' : ['Numerator', 'Denominator'],
'Sum' : [],
'Gain' : ['Gain'],
'Product' : [],
'Abs' : [],
'Math' : [],
'MinMax' : [],
'Trigonometry' : [],
'Sqrt' : [],
'Logic' : [],
'RelationalOperator' : [],
'Relay' : ['OnSwitchValue', 'OffSwitchValue', 'OnOutputValue', 'OffOutputValue'],
'Saturate' : ['UpperLimit', 'LowerLimit'],
'Switch' : ['Threshold'],
'ActionPort' : [],
'TriggerPort' : [],
'MultiPortSwitch' : [],
'Selector' : []
}
AllowedSubsystemBlocks = {
'ActionType': ['then', 'else', 'case', 'default','elseif'],
'TreatAsAtomicUnit': ['on'],
'RTWSystemCode': ['Auto', 'Reusable function', 'Function'],
'MaskType': ['CMBlock', 'Compare To Constant', 'DocBlock',
'Conversion', 'ReqId','Stateflow']
}
AllowedModelReferenceBlocks = {
'MaskType': ['Asymmetrical Debounce', 'Falling Edge', 'First order filter', 'Hysteresis',
'Latch', 'Periodic enable', 'Rate Limiter', 'Rising edge',
'Running average', 'SR Latch', 'Symmetrical Debounce']
}
AllowedReferenceBlocks = {
'SourceType' : ['Asymmetrical Debounce', 'CMBlock', 'Conversion',
'DocBlock', 'Falling Edge', 'Hysteresis',
'Latch', 'Lookup Table Dynamic', 'Rate Limiter',
'ReqId', 'Rising edge', 'Saturation Dynamic',
'SR Latch', 'SubSystem', 'Symmetrical Debounce'
'Function-Call Generator', 'Compare To Constant',
'First order filter','Periodic enable',
'Running average','Symmetrical Debounce'],
}
AttributesFormatString = {
'Lookup' : '<input=%<inputvalues>>\\\\n<output=%<outputvalues>>',
'UnitDelay' : '<initial=%<x0>>\\\\n<tsample=%<sampleTime>>',
'Switch' : '<threshold=%<threshold>>\\\\n<criteria=%<Criteria>>',
'DiscreteIntegrator' : '<initial=%<initialcondition>>\\\\n<tsample=%<sampleTime>>\\\\n<limits=%<UpperSaturationLimit>/%<LowerSaturationLimit>(%<LimitOutput>)>',
'DiscreteZeroPole' : '<tsample=%<sampleTime>>\\\\n<gain=%<gain>>',
'Outport' : '<tsample=%<SampleTime>>',
'Inport' : '<tsample=%<SampleTime>>',
'Lookup2D' : '<row=%<x>>\\\\n<column=%<y>>\\\\n<table=%<t>>',
'Saturate' : '<limits=%<upperlimit>\\%<lowerlimit>>',
'Backlash' : '<initial=%<initialoutput>,width=%<backlashwidth>>',
'DeadZone' : '<zone=%<lowervalue>/%<uppervalue>>',
'Relay' : '<low=(%<offswitchvalue>,%<offoutputvalue>)>\\\\n<high=(%<onswitchvalue>,%<onoutputvalue>)>',
'Merge' : '<initial=%<initialoutput>>',
'DiscreteTransferFcn' : '<tsample=%<sampleTime>>',
'Quantizer' : '<interval=%<quantizationinterval>>'
}
ReusableLibList = ['Asymmetrical Debounce', 'Falling Edge', 'First order filter',
'Hysteresis', 'Latch', 'Periodic enable', 'Rate Limiter', 'Rising edge',
'Running average', 'SR Latch', 'Symmetrical Debounce']
RuleDetails = {
'MISRA AC SLSF 002' : 'Data type conversion block used for signal data type conversion.',
'MISRA AC SLSF 003' : 'Fixed step discrete solver used for functional algorithm',
'MISRA AC SLSF 004' : 'Simulink diagnostic configuration.',
'MISRA AC SLSF 005 B' : 'Function and duplicate inport blocks must not be used',
'MISRA AC SLSF 005 C' : 'Data store memory usage must not be used to exchange data across subsystem.',
'MISRA AC SLSF 006 A' : 'Block parameters evaluation at runtime must not contain Expressions, Data type conversions and Selection of rows or columns.',
'MISRA AC SLSF 006 B' : 'Block parameters intended to be configured or calibrated must be entered as named constants.',
'MISRA AC SLSF 006 D' : 'named constants must be defined in an external file',
'MISRA AC SLSF 006 E' : 'Masked sub-systems must not be used to pass parameters',
'MISRA AC SLSF 007 A' : 'define explicitly the initialization value.',
'MISRA AC SLSF 008 A' : 'Saturation property should not be selected if configured to saturate on overflow',
'MISRA AC SLSF 008 B' : 'Configure rounding behaviour to zero',
'MISRA AC SLSF 009 B' : 'Block priority should be not used for block execution order',
'MISRA AC SLSF 009 C' : 'Execution order specified by function calls or data flows.',
'MISRA AC SLSF 009 D' : 'Sample time to be inherited.',
'MISRA AC SLSF 011 A' : 'Not more than one level of nested control flow.',
'MISRA AC SLSF 011 B' : 'Default case, a must in switch case',
'MISRA AC SLSF 012 A' : 'the control input must be a Boolean type.',
'MISRA AC SLSF 013 A' : 'at least two switched inputs',
'MISRA AC SLSF 013 C' : 'Control input must be greater than or equal to 1 and less than switched inputs.',
'MISRA AC SLSF 014 A' : 'S-functions must be only under certain conditions.',
'MISRA AC SLSF 015 A' : 'Vector signal:created either by feeding individual named scalar signals into a mux-block, or by using a vector constant, or by a Stateflow block.',
'MISRA AC SLSF 015 B' : 'Matrix signal:created either by feeding individual vector signals into a matrix concatenation block, or a matrix constant, or by a Stateflow block.',
'MISRA AC SLSF 015 C' : 'contain signals with common functionality, data type, dimensions and units.',
'MISRA AC SLSF 016 A' : 'created by using a bus creator block.',
'MISRA AC SLSF 016 B' : 'must be named.',
'MISRA AC SLSF 016 C' : 'must not contain unnamed signals.',
'MISRA AC SLSF 016 D' : 'must only be operated on by bus capable Simulink blocks.',
'MISRA AC SLSF 016 E' : 'be split up using a bus-selector block and not a demux-block only.',
'MISRA AC SLSF 017 A' : 'no unconnected blocks.',
'MISRA AC SLSF 017 B' : 'no unconnected signal lines or busses.',
'MISRA AC SLSF 018 A' : 'Global and scoped blocks must not be used.',
'MISRA AC SLSF 018 B' : 'Tag must match corresponding signal or bus label.',
'MISRA AC SLSF 018 C' : 'tags must be unique.',
'MISRA AC SLSF 018 D' : '"goto" block must have one or more matching "from" block.',
'MISRA AC SLSF 018 E' : ' "from" block must have exactly one matching "goto" block.',
'MISRA AC SLSF 027 A' : 'that require a label must be labelled directly at source.',
'MISRA AC SLSF 027 B' : 'Propagated labels must be used to redisplay the name.',
'MISRA AC SLSF 027 C' : 'passing through an inport must be labelled.',
'MISRA AC SLSF 027 D' : 'passing through an outport must be labelled.',
'MISRA AC SLSF 027 E' : 'originate from inside a re-useable subsystem must not labelled.',
'MISRA AC SLSF 027 G' : 'connected to Bus Creator, Goto, Mux, Subsystem, Stateflow Chart must be labelled.',
'MISRA AC SLSF 027 I' : 'Signal labels or propagated labels must be applied to busses with some conditions.',
'MISRA AC SLSF 027 J' : 'non-propagated labels must be unique.',
'MISRA AC SLSF 032 A' : ' port names must still be visible.',
'MISRA AC SLSF 034 A' : '"C-like bitwise operators" (& and |) must be enabled for all charts.',
'MISRA AC SLSF 034 C' : '"use strong data typing with Simulink I/O" is selected.',
'MISRA AC SLSF 034 D' : '"Execute (enter) Chart at Initialization" must be disabled.',
'MISRA AC SLSF 035 A' : 'The choice of state-chart or flow-chart is driven by the nature of the behaviour being modelled.',
'MISRA AC SLSF 035 B' : 'Truth tables must not be used.',
'MISRA AC SLSF 036 A' : 'Bus inputs are not permitted.',
'MISRA AC SLSF 036 C' : 'name of a Stateflow input/output must be the same as the corresponding signal label.',
'MISRA AC SLSF 037 A' : 'Must be defined at the chart level or below in the object hierarchy and not at the model level.',
'MISRA AC SLSF 037 B' : 'local data item name must not be used in different scopes within one state machine.',
'MISRA AC SLSF 037 G' : 'no unused data items.',
'MISRA AC SLSF 037 H' : 'must not be set to "Inherit: Same as Simulink".',
'MISRA AC SLSF 038 C' : 'C library functions must not be used in a state machine. ',
'MISRA AC SLSF 039 A' : ' a state must have either zero or more than one sub-state.',
'MISRA AC SLSF 040 B' : 'must not be used as a grouping mechanism',
'MISRA AC SLSF 040 D' : 'the order of the critical states must be documented in a textbox at the top level of the state machine, wherever critical.',
'MISRA AC SLSF 041 A' : 'must contain text only.',
'MISRA AC SLSF 042 A' : 'Super state containing exclusive states must have one default transition.',
'MISRA AC SLSF 042 B' : 'no more than one default transition',
'MISRA AC SLSF 042 C' : 'Top level of the state machine must not contain more than one default transitions.',
'MISRA AC SLSF 042 D' : 'inside a state chart must have ungaurded path to a state.',
'MISRA AC SLSF 042 E' : 'must not cross state boundaries',
'MISRA AC SLSF 043 A' : 'condition action and transition action must not be used in the same machine.',
'MISRA AC SLSF 043 D' : 'semi-colon at the end of each action.',
'MISRA AC SLSF 043 F' : 'no more than one internal transition from any state',
'MISRA AC SLSF 043 I' : 'one conditional transition must begin at every junction.',
'MISRA AC SLSF 043 J' : 'temporal logic must not be used.',
'MISRA AC SLSF 044 A' : 'during state actions must not be used.',
'MISRA AC SLSF 044 C' : 'In flow charts state actions must not be used.',
'MISRA AC SLSF 046 A' : 'History junction must not be used.',
'MISRA AC SLSF 047 A' : 'local , directed, broadcasted stateflow events, including all implicit eventsmust not be used.',
'MISRA AC SLSF 047 B' : 'output sateflows must be used only as outputs and not tested internally on transition conditions.',
'MISRA AC SLSF 048 A' : 'Matlab functions must not be called within state machine.',
'MISRA AC SLSF 048 B' : 'embedded MATLAB block must not be used.',
'MISRA AC SLSF 048 C' : 'c code within custom code tab needs to be just pre-processor directives.',
'MISRA AC SLSF 048 D' : 'pointers to be used only to call external functions.',
'MISRA AC SLSF 048 E' : 'custom code type needs to be converted to Mathworks type.',
'MISRA AC SLSF 048 F' : 'custom code must adhere to MISRA C',
'MISRA AC SLSF 048 G' : 'Numbers other than "0" and "1" must not appear on state machine.',
'MISRA AC SLSF 052 A' : 'must be unique within state machine.',
'MISRA AC SLSF 052 B' : 'same name as data should not be given in the chart.',
'MISRA AC SLSF 053 A' : 'transitions must not be drawn one upon the other.',
'MISRA AC SLSF 053 J' : 'must contain only one terminating junction.',
'MISRA AC SLSF 054 A' : 'above horizontal transitions and to the right of vertical transitions.',
'MISRA AC SLSF 055 A' : 'The order should be entry:, during: and exit: only.',
'HISL_0002 B' : 'Protect the second input of rem function from going to zero.',
'HISL_0002 A' : 'Protect the input of reciprocal function from going to zero.',
'HISL_0003 C' : 'Protect the input from going negative.',
'HISL_0004 A' : 'Protect the input from going negative.',
'HISL_0004 B' : 'Protect the input from equalling zero.',
'HISL_0005 A' : 'InElement-wise(.*) mode, protect all divisor inputs from going to zero.',
'HISL_0005 B' : 'In Matrix(*) mode, protect all divisor inputs from becoming singular input matrices.',
'HISL_0005 C' : 'Set the model configuration parameter Diagnostics > Data Validity > Signals > Division by singular matrix to error if Matrix(*) mode selected.',
'HISL_0008 B' : 'use a block that has a constant value for Iteration limit source, when source is external.',
'HISL_0010 A' : 'In the block parameter dialog box, select Show else condition.',
'HISL_0010 B' : 'Connect the outports of the If block to If Action Subsystem blocks.',
'HISL_0011 B' : 'Connect the outports of the Switch Case block to an Action Subsystem block.',
'HISL_0011 C' : 'Use an integer data type for the inputs to Switch Case blocks.',
'HISL_0012 B' : 'avoid using sample time-dependent blocks if the subsystem is called asynchronously',
'HISL_0013 A' : 'Configuration Parameters dialog box',
'HISL_0015 B' : 'Specify execution of the conditionally executed subsystems such that in all cases only one subsystem executes during a time step.',
'HISL_0015 C' : 'Clear the Merge block parameter Allow unequal port widths.',
'HISL_0021 A' : 'Use a consistent vector indexing method for all blocks. ',
'HISL_0022 A' : 'for index signals use integer or enum type.',
'HISL_0022 B' : 'type should cover the range of index.',
'HISL_0016 A' : 'Avoid comparisons using the == or ~= operator on floating-point data types.',
'HISL_0017 A' : 'Set the block Output data type parameter to Boolean.',
'HISL_0018 A' : 'Set the block Output data type parameter to Boolean.',
'HISL_0019 A' : 'Avoid signed integer data types as input to the block.',
'HISL_0019 B' : 'Choose an output data type that represents zero exactly.',
'HISF_0003 A' : 'Avoid signed integer data types as operands to the bitwise operations.',
'HISF_0010 A' : 'Avoid using these transitions.',
'HISF_0013 A' : 'Avoid creating transitions that cross from one parallel state to another.',
'HISF_0014 A' : 'Avoid transition paths that go into and out of a state without ending on a substate.',
'RP_0008' : 'Important Mask parameters of basic block should be displayed in their attribute format string.',
'RP_0012' : 'All signals entering and leaving a merge block should have matching name.',
'RP_0018' : 'input should not be boolean signals',
'RP_0021' : 'Width of signal inputs must be same.',
'RP_0028' : 'All events external to Stateflow should be a function call event.',
'RP_0036' : 'Transition from states must not depend on the implicit clockwise rule.',
'RP_0037' : 'Not permitted',
'RP_0046' : 'Not permitted',
'RP_0051' : 'Data types of signal inputs must be same.',
'RP_0054' : 'Allowed set of blocks are specified.',
'RP_0055' : 'Neither condition actions or transition actions should be used in transition between two states.',
'RP_0056' : 'Default shape and size should be used',
'RP_0057' : 'Name must be placed below',
'RP_0058' : 'must be name identical to corresponiding signal or bus name',
'RP_0059' : 'Shall be present at root level to detail revision history.',
'RP_0060' : 'Shall be present at root level to feature description.',
'RP_0061' : 'Look up method "Interpolation - Extrapolation" must not be used.',
'RP_0062' : 'All outputs from a feature must be displayed',
'RP_0063' : 'Global parmeters shall not be defined via Model Parameter Configuration Method.',
'RP_0064' : 'All signals and busses propagating from Blocks must be labelled with propagated signals.'
}
RuleCheckerInput = {
#TODO : Make it block type rather than property. (See rule in the spreadsheet)
'MISRA AC SLSF 005 B' : {'ResultType' : 'NotExist'
},
'MISRA AC SLSF 005 C' : {'Property' : 'DataStoreMemory',
'Model' : 'SIMULINK_BLOCK'},
'MISRA AC SLSF 006 A' : {'srchKeys' : {'BlockType':['Constant','DiscreteTransferFcn','DiscreteIntegrator','Gain','Lookup2D','Lookup','Relay','Saturate','Switch','UnitDelay','Reference'],'Name':'','SourceType':'Compare To Constant'},
'RuleInfo' : ['MANUAL CHECK RULE:check the Block Parameter value in Block:','that should not contain Expressions,Data Type Conversions,Selection of Rows and Columns.'],
'matchType' : 'Dynamic'
},
'MISRA AC SLSF 007 A' : {'PropChkData' : {'X0': '[]'},
'PropChkData1' : {'InitialOutput': '[]'},
'PropChkData2' : {'InitialCondition': '[]'},
'PropChkData3' : {'InitialStates': '[]'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 008 A' : {'PropChkData' : {'SaturateOnIntegerOverflow': 'off'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 008 B' : {'PropChkData' : {'RndMeth': 'Zero'},
'UniqueKey' : ['BlockType', 'Name'],
'ExcludeBlockLst' : ['Rounding']
},
'MISRA AC SLSF 009 B' : {'Property' : 'Priority',
'Model' : 'SIMULINK_BLOCK'},
'MISRA AC SLSF 009 D' : {'PropChkData1' : {'SampleTime': '-1'},
'UniqueKey' : ['BlockType', 'Name'],
'ExcludeBlockLst' : ['RateTransition', 'UnitDelay',
'DiscreteIntegrator', 'DiscreteTransferFcn',
'TriggerPort', 'Outport', 'Inport'],
'PropChkData2' : {'SystemSampleTime': '-1'},
'ListType' : 'Block',
'BlockType1' : 'SubSystem',
'BlockType2' : 'Reference',
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 011 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#'},
'DstInput' : {'BlockType' : 'If',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'BlockType',
'CheckValue' : 'If',
'CheckExp' : 'NOT EQUAL'}
},
'MISRA AC SLSF 011 B' : {'PropChkData' : {'ShowDefaultCase': 'on'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 012' : {
'UniqueKey' : ['BlockType', 'Name','Threshold'],
'PropChkData' : {
'SourceProp' : 'Criteria',
},
'ResultMatchType' : 'Match'
},
'MISRA AC SLSF 013 A' : {'ListType' : 'Block',
'BlockType' : 'MultiPortSwitch',
'PropChkData' : {'Inputs': 1},
'UniqueKey' : ['BlockType', 'Name'],
'ResultMatchType' : 'Greater'
},
'MISRA AC SLSF 013 C' : {'srchKeys' : {'BlockType':'MultiPortSwitch','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the control input of MultiPortSwitch Block in:','that value should be greater than or equal to one and not exceed the number of switched inputs.'],
'matchType' : 'blockExist'
},
'MISRA AC SLSF 016 A' : {'matchType' :'Match'
},
'MISRA AC SLSF 016 B' : {'matchType' :'Exist'
},
'MISRA AC SLSF 016 C' : {'matchType' :'NameExist',
'UniqueKey' :{'BlockType':'BusCreator'}
},
'MISRA AC SLSF 016 E' : {'matchType' :'NotExist'
},
'MISRA AC SLSF 017 A' : {'ListType' : ['Block','Line',],
'AllowedBlock' :[['Inport','From','Ground','Constant'], #only output Blocks
['Goto','Outport','Terminator'], #only input Blocks
['BusCreator','BusSelector','Mux','Demux','Merge','If','SwitchCase',
'Concatenate','Reference','Sum','Product','MinMax','Trigonometry',
'Logic','RelationalOperator','Saturate','DiscreteTransferFcn',
'TriggerPort','Selector','Math','MultiPortSwitch'
], # 2D ports,which may vary.
['Lookup','Sqrt','Abs','Gain','UnitDelay','Relay','RateTransition','DataTypeConversion'],#2D vector,fixied size.
{'Lookup2D':[2,1],
'Switch':[3,1]
},
['SubSystem'],
['DiscreteIntegrator']
]
},
'MISRA AC SLSF 017 B' : {'ListType' : 'Line',
'PropChkData' : {'SrcBlock': '',
'DstBlock':'',
},
'ResultMatchType' : 'Any'
},
'MISRA AC SLSF 018 A' : {'PropData' : {'BlockType': 'Goto'},
'CheckListData' : {'TagVisibility': 'local'},
'ListType' : 'Block',
'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 018 B' : {'PropData' : {'BlockType': 'Goto'},
'ListType' : ['Block','Line', 'Port'],
'UniqueKey' : ['BlockType', 'Name', 'PropagatedSignals'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 018 C' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'Goto',
'SourceProp' : 'GotoTag'}
},
'MISRA AC SLSF 018 D' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'Goto',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'From',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exist'
},
'MISRA AC SLSF 018 E' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Unique'
},
'MISRA AC SLSF 027 A' : {'ListType' : 'SrcBlock',
'AllowedBlock' :[['Inport','From','Ground','Constant',
'Lookup','Sqrt','Abs','Gain','UnitDelay','Relay','RateTransition','DataTypeConversion',
'Lookup2D','Switch'
],
['BusCreator','BusSelector','Mux','Demux','Merge','If','SwitchCase',
'Concatenate','Reference','Sum','Product','MinMax','Trigonometry',
'Logic','RelationalOperator','Saturate','DiscreteTransferFcn',
'TriggerPort','Selector','Math','MultiPortSwitch'
], # 2D ports,which may vary.
['DiscreteIntegrator']
]
},
'MISRA AC SLSF 027 C' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'Inport',
'SourceProp':'SrcBlock'},
'ResultMatchType' : 'Inport'
},
'MISRA AC SLSF 027 D' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'Outport',
'SourceProp':'DstBlock'},
'ResultMatchType' : 'Outport'
},
'MISRA AC SLSF 027 E' : {'srchKeys' : ['BlockType', 'SourceType','Name', 'MaskType'],
'PropChkData' : ['SrcBlock', 'Name']
},
'MISRA AC SLSF 027 G' : {
'matchType' :'NameExist',
'UniqueKey' :{'BlockType':'BusCreator'},
'UniqueKey2' : ['BlockType'],
'PropChkData' : {'SourceProp' : 'Name',
'BlockProp' : 'DstBlock'
},
'ResultMatchType' : 'Exist2',
'AllowedBlock' :['Mux','Goto','SubSystem']
},
'MISRA AC SLSF 027 I' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'BusCreator',
'SourceProp':'SrcBlock'},
'PropChkData1' : {'BlockType':'BusCreator',
'SourceProp':'DstBlock'},
'PropChkData2' : {'BlockType':'BusSelector',
'SourceProp':'SrcBlock'},
'PropChkData3' : {'BlockType':'BusSelector',
'SourceProp':'DstBlock'},
'ResultMatchType' : 'Exist'
},
'MISRA AC SLSF 027 J' : {'PropChkData' : {'SourceBlockType': 'Line',
'SourceProp' : 'Name'}
},
'MISRA AC SLSF 034 A' : {'ListType' : 'chart',
'PropChkData' : {'actionLanguage': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'MISRA AC SLSF 034 C' : {'ListType' : 'chart',
'PropChkData' : {'disableImplicitCasting': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'MISRA AC SLSF 034 D' : {'ListType' : 'chart',
'PropChkData' : {'executeAtInitialization': 0},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 035 B' : {'Property' : 'truthTable',
'Model' : 'STATEFLOW'
},
'MISRA AC SLSF 036 A' : {'srchKeys' :{'LineSrchKeys':['SrcBlock','Name'],
'BlckSrchKeys':['OutDataTypeStr','Name'],
'chartSrchKeys':['Name','Ports','MaskType','MaskDescription']
}
},
'MISRA AC SLSF 036 C' : {'srchKeys' :{'BlckSrchKeys':['BlockType','Name','Port'],
'chartSrchKeys':['Name','Ports','MaskType','MaskDescription']
}
},
'MISRA AC SLSF 037 G' : {'PropChkData' : {'SourceBlockType': 'data',
'SourceProp' : 'name',
'DestBlockType' : 'state',
'DestProp' : 'labelString',
'DestProp1' : 'labelString'
},
},
'MISRA AC SLSF 037 H' : {'ListType' : 'data',
'PropChkData' : {'dataType': 'Inherit: Same as Simulink'},
'ResultMatchType' : 'Opposite'
},
'MISRA AC SLSF 039 A' : {'ResultType' : 'Exist'
},
'MISRA AC SLSF 041 A' : {'ListType' : 'state',
'PropChkData' : {'type': 'GROUP_STATE'},
'ResultMatchType' : 'Text'
},
'MISRA AC SLSF 042 A' : {
'resultType' :'Exist'
},
'MISRA AC SLSF 042 B' : {
'resultType' :'Single'
},
'MISRA AC SLSF 042 C' : {
'resultType' :'DefaultAtTop'
},
'MISRA AC SLSF 042 D' : {
'resultType' :'Unguarded_Exist'
},
'MISRA AC SLSF 042 E' : {
'resultType' :'DefaultTx_Exist'
},
'MISRA AC SLSF 043 D' : {
'ChkData' : ';'
},
'MISRA AC SLSF 043 A' : {
'srchKeys' : ['labelString','chart']
},
'MISRA AC SLSF 043 I' : {
'resultType' :'Unguarded_Exist'
},
'MISRA AC SLSF 043 J' : {
'ChkData' : ['after', 'before', 'at', 'every', 'temporalCount']
},
'MISRA AC SLSF 044 A' : {'ListType' : 'state',
'PropChkData' : {'labelString': ['during:', 'du:']},
'ResultMatchType' : 'Contains'
},
'MISRA AC SLSF 044 C' : {
'ChkData' : ';'
},
'MISRA AC SLSF 046 A' : {'ListType' : 'junction',
'PropChkData' : {'type': 'HISTORY_JUNCTION'},
'ResultMatchType' : 'Opposite'
},
'MISRA AC SLSF 048 A' : {'Property' : 'MATLABFcn',
'Model' : 'SIMULINK_BLOCK'
},
'MISRA AC SLSF 048 B' : {'ResultType' : 'Exist',
},
'MISRA AC SLSF 048 C' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:C Code with in the custom code tab must be limited to preprocessor statements'],
},
'MISRA AC SLSF 048 D' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:Pointers must not be used except when they required to call an external function'],
},
'MISRA AC SLSF 048 E' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:Custom code variables must be restricted to fixied width word size datatypes 1)signed 8,16,32 integers(int8_T,int16_T,int32_T) 2)unsigned 8,16,32 integers(uint8_T,uint16_T,uint32_T) 3)32 and 64 bit floating point number(real32_T,real64_T) 4)Bolean(boolean_T)'],
},
'MISRA AC SLSF 048 F' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:LDRA Tool checks the MISRA C standards for used custom code,so check the LDRA tool reports'],
},
'MISRA AC SLSF 048 G' : {'ListType' : 'state',
'ListType1' : 'transition',
'PropChkData' : {'labelString': ['0', '1']},
'ResultMatchType' : 'Otherthan',
'ListFoundCheck' : 'PASS',
'PropFoundCheck' : 'FALSE'
},
'MISRA AC SLSF 052 A' : {'PropChkData' : {'SourceBlockType': 'state',
'SourceProp' : 'labelString'}
},
'MISRA AC SLSF 052 B' : {'PropChkData' : {'SourceBlockType': 'data',
'SourceProp' : 'name',
'DestBlockType' : 'state',
'DestProp' : 'labelString'
},
'CheckType' : 'Unique'
},
'MISRA AC SLSF 053 A' : {
'resultType' :'NotExist'
},
'HISL_0002 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutMin' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Math',
'Operator': 'reciprocal',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckExp' : 'MANUAL'}
},
'HISL_0002 B' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['rem'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the second input of the rem Block in:','If it is zero,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0003 C' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutMin' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Sqrt',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutMin',
'CheckValue' : 0,
'CheckExp' : 'GREATER/EQUAL'}
},
'HISL_0004 A' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['log','log10'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the input of the logarithm Block in:','If it is negative,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0004 B' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['log','log10'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the input of the logarithm Block in:','If it is zero,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0005 A' : {'srchKeys' : {'BlockType':'Product','Multiplication':'','Name':'','Inputs':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input of divisor port in Product Block in:','If it is zero,then this rule will fail'],
'matchType' : 'ProductExist'
},
'HISL_0005 B' : {'srchKeys' :{'BlockType':'Product','Multiplication':'','Name':'','Inputs':''},
'RuleInfo' :['MANUAL CHECK RULE:check the input signal of the divisor port in Product Block in:','If it is singular input matrices,then this rule will fail'],
'matchType' :'ProductExist'
},
'HISL_0010 A' : {'PropChkData' : {'ShowElse': 'on'},
'UniqueKey' : ['BlockType', 'Name','ElseIfExpressions'],
'ListType' : 'Block',
'BlockType' : 'If',
'ResultMatchType' : 'Exact'
},
'HISL_0010 B' : {'PropChkData' : {'DstPort': 'ifaction'},
'blockType' : 'IfExist',
'UniqueKey' : ['BlockType', 'Name','Ports','ElseIfExpressions']
},
'HISL_0011 B' : {'PropChkData' : {'DstPort': 'ifaction'},
'blockType' : 'SwitchCaseExist',
'UniqueKey' : ['BlockType', 'Name','Ports']
},
'HISL_0011 C' : {'srchKeys' : {'BlockType':'SwitchCase','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input of SwitchCase Block in:','If it is not a integer datatype,then this rule will fail'],
'matchType' : 'blockExist'
},
'HISL_0015 B' : {'srchKeys' : {'BlockType':'Merge','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:If two or more inputs of Merge Block in:',' are coming from conditionally excuted subSystems, then such inputs must have mutual exclusion between the conditionally executed subsystems feeding a Merge block'],
'matchType' : 'blockExist'
},
'HISL_0015 C' : {'PropChkData' : {'AllowUnequalInputPortWidths': 'off'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType' : 'Merge',
'ResultMatchType' : 'Exact'
},
'HISL_0016 A' : {'srchKeys' : {'BlockType':'RelationalOperator','Operator':['==','~='],'Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input signals of RelationalOperator Block in:','If input signals are float type,then this rule will fail'],
'matchType' : 'Exist'
},
'HISL_0017 A' : {
'UniqueKey' : {'BlockType':'RelationalOperator'},
'SrchKeys' : ['BlockType','OutDataTypeStr','Name']
},
'HISL_0018 A' : {
'UniqueKey' : {'BlockType':'Logic'},
'SrchKeys' : ['BlockType','OutDataTypeStr','Name']
},
'HISL_0019 A' : {'srchKeys' : {'BlockType':'Reference','SourceType':['Bitwise Operator'],'Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input signals of Bitwise Operator Block in:','If input signals are signed integer data type,then this rule will fail'],
'matchType' : 'Exist'
},
'HISL_0019 B' : {'PropData' : {'BlockType': 'Reference',
'SourceType': 'Bitwise Operator'},
'CheckListData' : {'BitMaskRealWorld' : 'Stored Integer'},
'ListType' : 'Block'
},
'HISF_0003 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutDataTypeStr' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Reference',
'SourceType': 'Bitwise Operator',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutDataTypeStr',
'CheckValue' : ['uint8', 'uint16','uint32'],
'CheckExp' : 'WITHIN'}
},
'RP_0012' : {
'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {
'SourceProp' : 'Ports',
},
'ResultMatchType' : 'Unique'
},
'RP_0018' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#'},
'DstInput' : {'BlockType' : 'RelationalOperator',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutDataTypeStr',
'CheckValue' : 'Boolean',
'CheckExp' : 'NOT EQUAL'}
},
'RP_0021' : {'PropChkData' : {'AllowDiffInputSizes': 'off'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType' : 'Switch',
'ResultMatchType' : 'Exact'
},
'RP_0028' : {'srchKeys_chart' :['id','name'],
'srchKeys_event' :['name','linkNode','scope','trigger']
},
'RP_0036' : {'ListType' : 'chart',
'PropChkData' : {'userSpecifiedStateTransitionExecutionOrder': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'RP_0037' : {
'ChkData' : ';'
},
'RP_0046' : {'ListType' : 'state',
'ListType1' : 'transition',
'PropChkData' : {'labelString': []},
'ResultMatchType' : 'DoesNotContain',
},
'RP_0051' : {'PropChkData' : {'InputSameDT': 'on'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType1' : 'Switch',
'BlockType2' : 'MultiPortSwitch',
'ResultMatchType' : 'Exact'
},
'RP_0055' : {'ListType' : 'transition',
'PropChkData' : {'labelString': [';']},
'ResultMatchType' : 'Contains',
'ListFoundCheck' : 'PASS',
'PropFoundCheck' : 'TRUE'
},
'RP_0057' : {'Property' : 'NamePlacement',
'Model' : 'SIMULINK_BLOCK'
},
'RP_0058' : { 'matchType' :'Exact',
'UniqueKey' :{'BlockType':'Inport'},
'UniqueKey2' :{'BlockType':'Outport'}
},
'RP_0059' : {'SrchKeys' : ['BlockType','SourceType','ECoderFlag'],
'PropChkData' : {'ECoderFlag': 'History'}
},
'RP_0060' : {'SrchKeys' : ['BlockType','SourceType','ECoderFlag'],
'PropChkData' : {'ECoderFlag': 'Description'}
},
'RP_0061' : {'PropChkData' : {'LookUpMeth': 'Interpolation-Extrapolation'},
'UniqueKey' : ['BlockType', 'Name']
},
'RP_0063' : {'Property' : 'TunableVars',
'Model' : 'SIMULINK_MODEL'
},
'RP_0064' : {
'UniqueKey' : ['BlockType'],
'PropChkData' : {'SourceProp' : 'Name',
'BlockProp' : 'SrcBlock'
},
'ResultMatchType' : 'Exist',
'AllowedBlock' : ['From','SubSystem','Demux','Selector']
}
}
configReferenceFiles = {
'mdlref10ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_10ms_mdlref_config_set.m',
'context10ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_10ms_context_config_set.m',
'mdlref50ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_50ms_mdlref_config_set.m',
'context50ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_50ms_context_config_set.m',
'lib_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_libraryMdlRef_config_set.m'}
| [
[
[
2,
16
]
],
[
[
11716,
11727
]
],
[
[
12268,
12286
]
],
[
[
14616,
14638
]
],
[
[
15073,
15100
]
],
[
[
15467,
15489
]
],
[
[
16267,
16289
]
],
[
[
17867,
17882
]
],
[
[
18130,
18141
]
],
[
[
31717,
31733
]
],
[
[
73232,
73252
]
]
] |
# Телеграм-бот v.002 - бот создаёт меню, присылает собачку, и анекдот
import telebot # pyTelegramBotAPI 4.3.1
from telebot import types
import requests
import bs4
bot = telebot.TeleBot('5105972662:AAG24fr382U1_hosO4Zrb-tv_BTakAV1MPk') # Создаем экземпляр бота
# -----------------------------------------------------------------------
# Функция, обрабатывающая команду /start
@bot.message_handler(commands=["start"])
def start(message, res=False):
chat_id = message.chat.id
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("👋 Главное меню")
btn2 = types.KeyboardButton("❓ Помощь")
markup.add(btn1, btn2)
bot.send_message(chat_id,
text="Привет, {0.first_name}! Я тестовый бот для курса программирования на языке ПаЙтон".format(
message.from_user), reply_markup=markup)
# -----------------------------------------------------------------------
# Получение сообщений от юзера
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
if ms_text == "Главное меню" or ms_text == "👋 Главное меню" or ms_text == "Вернуться в главное меню": # ..........
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Развлечения")
btn2 = types.KeyboardButton("WEB-камера")
btn3 = types.KeyboardButton("Управление")
back = types.KeyboardButton("Помощь")
markup.add(btn1, btn2, btn3, back)
bot.send_message(chat_id, text="Вы в главном меню", reply_markup=markup)
elif ms_text == "Развлечения": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Картиночки с котиками")
btn2 = types.KeyboardButton("Анекдоты")
btn3 = types.KeyboardButton("Картиночки с собачками")
btn4 = types.KeyboardButton("Играть в камень-ножницы-бумага")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, back)
bot.send_message(chat_id, text="Развлечения", reply_markup=markup)
# ..............................................................................
elif ms_text == "/cat" or ms_text == "Картиночки с котиками":
contents = requests.get('https://random.cat/meow.json').json()
urlCAT = contents['url']
bot.send_photo(chat_id, photo=urlCAT, caption="Держи котика!")
# ..............................................................................
elif ms_text == "Анекдоты":
bot.send_message(chat_id, text="еще не готово...")
# .............................................................................
elif ms_text == "/dog" or ms_text == "Картиночки с собачками":
contents = requests.get('https://random.dog/woof.json').json()
urlDOG = contents['url']
bot.send_photo(chat_id, photo=urlDOG, caption="Держи собатьку!")
#..............................................................................
elif ms_text == "Играть в камень-ножницы-бумага":
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "WEB-камера": # .............................................................................
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "Управление": # ...................................................................................
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "Помощь" or ms_text == "/help": # .................................................................
bot.send_message(chat_id, "Автор: Панасенко Софья, 1-МД-5")
key1 = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/ave_satanas_bitch")
key1.add(btn1)
img = open('author.jpg', 'rb')
bot.send_photo(message.chat.id, img, reply_markup=key1)
else: # ...........................................................................................................
bot.send_message(chat_id, text="Я тебя слышу!!! Ваше сообщение: " + ms_text)
# -----------------------------------------------------------------------
bot.polling(none_stop=True, interval=0) # Запускаем бота
print()
| [
[
[
80,
87
],
[
179,
186
]
],
[
[
135,
140
],
[
511,
516
],
[
571,
576
],
[
622,
627
],
[
1292,
1297
],
[
1356,
1361
],
[
1408,
1413
],
[
1459,
1464
],
[
1510,
1515
],
[
1809,
1814
],
[
1873,
1878
],
[
1935,
1940
],
[
1984,
1989
],
[
2047,
2052
],
[
2118,
2123
],
[
3892,
3897
],
[
3937,
3942
]
],
[
[
149,
157
],
[
2462,
2470
],
[
2964,
2972
]
],
[
[
166,
169
]
],
[
[
173,
176
],
[
392,
395
],
[
1015,
1018
],
[
4443,
4446
],
[
690,
693
],
[
1594,
1597
],
[
2226,
2229
],
[
2557,
2560
],
[
2744,
2747
],
[
3059,
3062
],
[
3269,
3272
],
[
3448,
3451
],
[
3632,
3635
],
[
3816,
3819
],
[
4099,
4102
],
[
4288,
4291
]
],
[
[
437,
442
]
],
[
[
1064,
1081
]
]
] |
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: valle.mrv@gmail.com
# @Filename: models.py
# @Last modified by: valle
# @Last modified time: 15-Feb-2018
# @License: Apache license vesion 2.0
from __future__ import unicode_literals
from django.db.models import Q
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from adminshop.models import (Clientes, Direcciones, Proveedores,
Productos, Presupuesto)
# Create your models here.
CHOICES_TIPO_PAGO = (
('EF', 'Efectivo'),
('TJ', 'Tarjeta'),
('TB', 'Transferencia bancaria'),
('PY', 'Paypal'),
('CR', 'Contrarembolso'),
)
CHOICES_TIPO_VENDEDOR = (
('CL', 'Cliente'),
('PV', 'Proveedor'),
('NO', 'No asignado')
)
CHOICES_TIPO_DOC = (
('CP', 'Compra'),
('FT', 'Factura'),
('RP', 'Reparacion'),
('AB', 'Abono'),
('OS', 'Testeo')
)
class DocumentSendPolice(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentSendGestoria(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentoTesteo(models.Model):
cliente = models.ForeignKey("clientes", on_delete=models.CASCADE )
producto = models.ForeignKey("Productos", on_delete=models.CASCADE )
empleado = models.ForeignKey(User, on_delete=models.CASCADE )
firma = models.FileField(upload_to='firmas', null=True)
frimado = models.BooleanField(default=False)
fecha = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.cliente)
class Meta:
ordering = ["-id"]
class ConfigSite(models.Model):
ISP = models.IntegerField(blank=True, default=21)
email_policia = models.EmailField(max_length=100, blank=True)
email_gestoria = models.EmailField(max_length=100, blank=True)
codigo_compra = models.IntegerField("Inicio contador", default=3023)
firma_tienda = models.FileField(upload_to='config', blank=True)
logo_tienda = models.FileField(upload_to='config', blank=True)
class Compras(models.Model):
vendedor_id = models.IntegerField(null=True)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha_entrada = models.DateTimeField(auto_now_add=True)
codigo_compra = models.CharField(max_length=150, null=True)
firma = models.FileField(upload_to='firmas', null=True)
tipo_compra = models.CharField(max_length=4, default="REBU", choices=[("REBU","REBU"), ("ISP","ISP")])
doc_proveedor = models.FileField(upload_to='doc_proveedor', null=True, default=None, max_length=500)
enviar_policia = models.BooleanField("Enviar a la policia", blank=True, default=True)
tipo_vendedor = models.CharField(
max_length=2,
choices=CHOICES_TIPO_VENDEDOR,
default="NO",
)
def set_vendedor(self, vendedor):
if vendedor != None:
self.vendedor_id = vendedor.id
if type(vendedor) == Clientes:
self.tipo_vendedor = "CL"
else:
self.tipo_vendedor = "PV"
else:
self.tipo_vendedor = "NO"
def get_vendedor(self):
if self.tipo_vendedor == "CL":
clientes = Clientes.objects.filter(Q(pk=self.vendedor_id))
if len(clientes) > 0:
cliente = clientes[0]
vendedor = {}
vendedor["DNI"] = cliente.DNI
vendedor["nombre"] = cliente.nombre_completo
direcciones = Direcciones.objects.filter(cliente_id=self.vendedor_id)
if len(direcciones) > 0:
direccion = direcciones[0]
else:
direccion = ""
vendedor["direccion"] = direccion
vendedor["telefono"] = cliente.telefono
vendedor["email"] = cliente.email
vendedor["id"] = cliente.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
elif self.tipo_vendedor == "PV":
ps = Proveedores.objects.filter(Q(pk=self.vendedor_id))
if len(ps) > 0:
p = ps[0]
vendedor = {}
vendedor["DNI"] = p.CIF
vendedor["nombre"] = p.razon_social
vendedor["direccion"] = p.direccion
vendedor["telefono"] = p.telefono
vendedor["email"] = p.email
vendedor["id"] = p.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
def save(self, *args, **kwargs):
super(Compras, self).save()
if self.codigo_compra == None:
self.codigo_compra = ConfigSite.objects.all()[0].codigo_compra+self.pk
super(Compras, self).save()
class Meta:
ordering= ["-id"]
class Ventas(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
entrega = models.DecimalField(max_digits=10, decimal_places=2, default=0)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasVentas(models.Model):
venta = models.ForeignKey("Ventas", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=6, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Abonos(models.Model):
factura = models.ForeignKey("Ventas", on_delete=models.CASCADE)
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasAbonos(models.Model):
abono = models.ForeignKey("Abonos", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=5, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Historial(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.CASCADE)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha = models.DateTimeField(auto_now_add=True)
detalle = models.CharField(max_length=150)
def __unicode__(self):
return self.detalle
class Meta:
ordering = ["-id"]
class Firmas(models.Model):
tipo_documento = models.CharField(
max_length=2,
choices=CHOICES_TIPO_DOC,
default="CP",
)
empleado_id = models.IntegerField()
documento_id = models.IntegerField()
fecha = models.DateTimeField(auto_now=True)
firmado = models.BooleanField(default=False)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
def get_nombre_cliente(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
except:
vendedor = { "nombre": "Documento borrado"}
return vendedor["nombre"]
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente.nombre_completo
except:
cliente = "Documento borrado"
return cliente
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
return cliente.nombre_completo
def get_ns_imei(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.ns_imei
except:
return "Documento borrado"
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.ns_imei
except:
ns_imei = "Documento borrado"
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.ns_imei
def get_producto_pk(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.id
except:
return 0
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.id
except:
ns_imei = 0
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.pk
def get_documento(self):
if self.tipo_documento == "CP":
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
datos_send= {
"pk": compra.pk,
"id_producto": compra.producto.pk,
'nombre': vendedor["nombre"],
"DNI": vendedor["DNI"],
"ns_imei": compra.producto.ns_imei,
"precio_compra": str(compra.producto.precio_compra),
}
return "tienda/sign/sign_compras.html", datos_send
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_reparacion.html", datos_send
except:
self.delete()
return None, None
elif self.tipo_documento == "OS":
try:
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_testeo.html", datos_send
except:
self.delete()
return None, None
class Meta:
ordering = ["-fecha"]
| [
[
[
265,
281
]
],
[
[
311,
312
],
[
3665,
3666
],
[
4564,
4565
]
],
[
[
335,
341
],
[
992,
998
],
[
1026,
1032
],
[
1080,
1086
],
[
1131,
1137
],
[
1245,
1251
],
[
1279,
1285
],
[
1333,
1339
],
[
1384,
1390
],
[
1494,
1500
],
[
1523,
1529
],
[
1563,
1569
],
[
1595,
1601
],
[
1637,
1643
],
[
1669,
1675
],
[
1704,
1710
],
[
1733,
1739
],
[
1795,
1801
],
[
1842,
1848
],
[
2001,
2007
],
[
2026,
2032
],
[
2090,
2096
],
[
2157,
2163
],
[
2223,
2229
],
[
2295,
2301
],
[
2362,
2368
],
[
2427,
2433
],
[
2460,
2466
],
[
2506,
2512
],
[
2547,
2553
],
[
2578,
2584
],
[
2612,
2618
],
[
2648,
2654
],
[
2708,
2714
],
[
2764,
2770
],
[
2830,
2836
],
[
2939,
2945
],
[
3045,
3051
],
[
3135,
3141
],
[
5498,
5504
],
[
5527,
5533
],
[
5567,
5573
],
[
5610,
5616
],
[
5661,
5667
],
[
5711,
5717
],
[
5763,
5769
],
[
5825,
5831
],
[
5906,
5912
],
[
6272,
6278
],
[
6299,
6305
],
[
6337,
6343
],
[
6367,
6373
],
[
6420,
6426
],
[
6467,
6473
],
[
6516,
6522
],
[
6578,
6584
],
[
6615,
6621
],
[
6684,
6690
],
[
6713,
6719
],
[
6751,
6757
],
[
6781,
6787
],
[
6821,
6827
],
[
6864,
6870
],
[
6915,
6921
],
[
6965,
6971
],
[
7017,
7023
],
[
7082,
7088
],
[
7448,
7454
],
[
7475,
7481
],
[
7513,
7519
],
[
7543,
7549
],
[
7596,
7602
],
[
7643,
7649
],
[
7692,
7698
],
[
7754,
7760
],
[
7791,
7797
],
[
7864,
7870
],
[
7893,
7899
],
[
7933,
7939
],
[
7964,
7970
],
[
8005,
8011
],
[
8036,
8042
],
[
8070,
8076
],
[
8098,
8104
],
[
8152,
8158
],
[
8300,
8306
],
[
8336,
8342
],
[
8456,
8462
],
[
8497,
8503
],
[
8531,
8537
],
[
8581,
8587
]
],
[
[
381,
385
],
[
1687,
1691
],
[
2596,
2600
],
[
8054,
8058
],
[
6054,
6058
],
[
6191,
6195
],
[
7230,
7234
],
[
7367,
7371
],
[
8661,
8665
],
[
8798,
8802
]
],
[
[
407,
415
]
],
[
[
446,
454
],
[
3386,
3394
],
[
3641,
3649
]
],
[
[
456,
467
],
[
3928,
3939
]
],
[
[
469,
480
],
[
4537,
4548
]
],
[
[
512,
521
]
],
[
[
523,
534
],
[
9211,
9222
],
[
9928,
9939
],
[
10582,
10593
],
[
11538,
11549
]
],
[
[
564,
581
],
[
5962,
5979
],
[
7138,
7155
]
],
[
[
726,
747
],
[
3191,
3212
]
],
[
[
829,
845
],
[
8392,
8408
]
],
[
[
973,
991
]
],
[
[
1224,
1244
]
],
[
[
1478,
1493
],
[
9462,
9477
],
[
10172,
10187
],
[
10803,
10818
],
[
12129,
12144
]
],
[
[
1990,
2000
],
[
5354,
5364
]
],
[
[
2419,
2426
],
[
5260,
5267
],
[
5418,
5425
],
[
8922,
8929
],
[
9697,
9704
],
[
10374,
10381
],
[
10978,
10985
]
],
[
[
5491,
5497
]
],
[
[
6259,
6271
]
],
[
[
6677,
6683
]
],
[
[
7435,
7447
]
],
[
[
7854,
7863
]
],
[
[
8293,
8299
]
]
] |
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs42_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {'row_number', 'place_of_performance_forei', 'place_of_perform_country_c', 'record_type',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test PrimaryPlaceOfPerformanceForeignLocationDescription is required for foreign places of performance
(i.e., when PrimaryPlaceOfPerformanceCountryCode does not equal USA) for record type 2. This test shouldn't
care about content when country_code is USA (that is for another validation).
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='description',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='description',
place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='c')
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UsA', record_type=2,
correction_delete_indicatr='C')
det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=1,
correction_delete_indicatr='')
det_award_6 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='UK', record_type=1,
correction_delete_indicatr='')
# Ignore correction delete indicator of D
det_award_7 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4,
det_award_5, det_award_6, det_award_7])
assert errors == 0
def test_failure(database):
""" Test failure PrimaryPlaceOfPerformanceForeignLocationDescription is required for foreign places of performance
(i.e., when PrimaryPlaceOfPerformanceCountryCode does not equal USA) for record type 2.
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| [
[
[
53,
92
],
[
897,
936
],
[
1192,
1231
],
[
1490,
1529
],
[
1778,
1817
],
[
2064,
2103
],
[
2348,
2387
],
[
2680,
2719
],
[
3445,
3484
],
[
3729,
3768
]
],
[
[
139,
155
],
[
2961,
2977
],
[
4012,
4028
]
],
[
[
157,
170
],
[
459,
472
]
],
[
[
172,
177
],
[
473,
478
],
[
2978,
2983
],
[
4029,
4034
]
],
[
[
233,
252
]
],
[
[
534,
546
]
],
[
[
3179,
3191
]
]
] |
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import ClassVar
import warnings
@dataclass(init=False, repr=False, frozen=True)
class Channel:
"""Base class of a hardware channel.
Not to be initialized itself, but rather through a child class and the
``Local`` or ``Global`` classmethods.
Attributes:
name: The name of channel.
basis: The addressed basis name.
addressing: "Local" or "Global".
max_abs_detuning: Maximum possible detuning (in rad/µs), in absolute
value.
max_amp: Maximum pulse amplitude (in rad/µs).
retarget_time: Maximum time to change the target (in ns).
max_targets: How many qubits can be addressed at once by the same beam.
clock_period: The duration of a clock cycle (in ns). The duration of a
pulse or delay instruction is enforced to be a multiple of the
clock cycle.
min_duration: The shortest duration an instruction can take.
max_duration: The longest duration an instruction can take.
Example:
To create a channel targeting the 'ground-rydberg' transition globally,
call ``Rydberg.Global(...)``.
"""
name: ClassVar[str]
basis: ClassVar[str]
addressing: str
max_abs_detuning: float
max_amp: float
retarget_time: int = None
max_targets: int = 1
clock_period: int = 4 # ns
min_duration: int = 16 # ns
max_duration: int = 67108864 # ns
@classmethod
def Local(cls, max_abs_detuning, max_amp, retarget_time=220, **kwargs):
"""Initializes the channel with local addressing.
Args:
max_abs_detuning (float): Maximum possible detuning (in rad/µs), in
absolute value.
max_amp(float): Maximum pulse amplitude (in rad/µs).
retarget_time (int): Maximum time to change the target (in ns).
"""
return cls('Local', max_abs_detuning, max_amp,
retarget_time=retarget_time, **kwargs)
@classmethod
def Global(cls, max_abs_detuning, max_amp, **kwargs):
"""Initializes the channel with global addressing.
Args:
max_abs_detuning (float): Maximum possible detuning (in rad/µs), in
absolute value.
max_amp(float): Maximum pulse amplitude (in rad/µs).
"""
return cls('Global', max_abs_detuning, max_amp, **kwargs)
def validate_duration(self, duration):
"""Validates and adapts the duration of an instruction on this channel.
Args:
duration (int): The duration to validate.
"""
try:
_duration = int(duration)
except (TypeError, ValueError):
raise TypeError("duration needs to be castable to an int but "
"type %s was provided" % type(duration))
if duration < self.min_duration:
raise ValueError("duration has to be at least "
+ f"{self.min_duration} ns.")
if duration > self.max_duration:
raise ValueError("duration can be at most "
+ f"{self.max_duration} ns.")
if duration % self.clock_period != 0:
_duration += self.clock_period - _duration % self.clock_period
warnings.warn(f"A duration of {duration} ns is not a multiple of "
f"the channel's clock period ({self.clock_period} "
f"ns). It was rounded up to {_duration} ns.")
return _duration
def __repr__(self):
s = ".{}(Max Absolute Detuning: {} rad/µs, Max Amplitude: {} rad/µs"
config = s.format(self.addressing, self.max_abs_detuning, self.max_amp)
if self.addressing == 'Local':
config += f", Target time: {self.retarget_time} ns"
if self.max_targets > 1:
config += f", Max targets: {self.max_targets}"
config += f", Basis: '{self.basis}'"
return self.name + config + ")"
@dataclass(init=True, repr=False, frozen=True)
class Raman(Channel):
"""Raman beam channel.
Channel targeting the transition between the hyperfine ground states, in
which the 'digital' basis is encoded. See base class.
"""
name: ClassVar[str] = 'Raman'
basis: ClassVar[str] = 'digital'
@dataclass(init=True, repr=False, frozen=True)
class Rydberg(Channel):
"""Rydberg beam channel.
Channel targeting the transition between the ground and rydberg states,
thus enconding the 'ground-rydberg' basis. See base class.
"""
name: ClassVar[str] = 'Rydberg'
basis: ClassVar[str] = 'ground-rydberg'
@dataclass(init=True, repr=False, frozen=True)
class Microwave(Channel):
"""Microwave adressing channel.
Channel targeting the transition between two rydberg states, thus encoding
the 'XY' basis. See base class.
"""
name: ClassVar[str] = 'Microwave'
basis: ClassVar[str] = 'XY'
| [
[
[
611,
620
],
[
668,
677
],
[
4625,
4634
],
[
4938,
4947
],
[
5268,
5277
]
],
[
[
640,
648
],
[
1786,
1794
],
[
1811,
1819
],
[
4874,
4882
],
[
4909,
4917
],
[
5195,
5203
],
[
5232,
5240
],
[
5510,
5518
],
[
5549,
5557
]
],
[
[
656,
664
],
[
3910,
3918
]
],
[
[
721,
728
],
[
4683,
4690
],
[
4998,
5005
],
[
5330,
5337
]
],
[
[
4677,
4682
]
],
[
[
4990,
4997
]
],
[
[
5320,
5329
]
]
] |
"""Views for the wordrelaygame app."""
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.views.generic import View, DetailView, ListView
from .forms import WordForm
from .models import Story
class HomeView(DetailView):
"""Main view that displays the current story & information about the game.
Shows the current story and information about the game. If there is a user
logged in and it is their turn, it show a form to add a word to the story.
"""
context_object_name = 'latest_story'
model = Story
template_name = 'wordrelaygame/home.html'
def get_object(self, queryset=None):
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
return None
else:
return latest_story
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Only pass the form to the context if the current user is different
# to the user that wrote the last word of the story.
try:
latest_word_auth_id = (self.object.words.order_by('-id')[0].
author.id)
except (AttributeError, IndexError):
latest_word_auth_id = None
if(kwargs.get('current_user_id') != latest_word_auth_id or
latest_word_auth_id is None):
context['form'] = WordForm()
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object() # pylint: disable=locally-disabled, W0201
if request.user.is_authenticated:
current_user_id = request.user.id
else:
current_user_id = None
context = self.get_context_data(object=self.object,
current_user_id=current_user_id)
return self.render_to_response(context)
class StoryListView(ListView):
"""Show an archive of past stories."""
model = Story
paginate_by = 5
def get_queryset(self):
queryset = super().get_queryset()
return queryset.order_by('-date_created')
class AddWordView(LoginRequiredMixin, View):
"""Add a word to the latest story."""
http_method_names = ['post']
def post(self, request):
"""Handles the POST request to add a word to the latest story."""
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
messages.error(request, 'You need to create a story to add a word.')
return redirect('wordrelaygame:home')
# Check the author of the previous word is different to the current
# logged in user.
try:
latest_word_auth_id = (latest_story.words.order_by('-id')[0].
author.id)
except IndexError:
latest_word_auth_id = None
if latest_word_auth_id == self.request.user.id:
messages.error(request, 'You added the last word. ' +
'Someone else needs to add a word next.')
return redirect('wordrelaygame:home')
# If the form is valid, save the new word
form = WordForm(request.POST)
if form.is_valid():
word = form.save(commit=False)
word.story = latest_story
word.author = self.request.user
word.save()
messages.success(request, 'Your word as been added. Thanks!')
return redirect('wordrelaygame:home')
return render(request, 'wordrelaygame/home.html',
{'form': form, 'latest_story': latest_story})
class AddStoryView(LoginRequiredMixin, View):
"""Create a new story.
Only allow the creation of a new story if there are no stories or if the
latest stories contains at least 64 words.
"""
http_method_names = ['post']
def post(self, request):
"""Handles the POST request to add a new story."""
add_story_allowed = False
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
add_story_allowed = True
else:
if latest_story.words.count() > 64:
add_story_allowed = True
if add_story_allowed:
new_story = Story()
new_story.save()
messages.success(
request,
'A new story has been created. Now add the first word.'
)
else:
messages.error(
request,
('Failed to create new story. Add more '
'words to the current story instead.')
)
return redirect('wordrelaygame:home')
| [
[
[
66,
74
],
[
2554,
2562
],
[
3044,
3052
],
[
3495,
3503
],
[
4455,
4463
],
[
4610,
4618
]
],
[
[
114,
132
],
[
2224,
2242
],
[
3755,
3773
]
],
[
[
162,
170
],
[
2642,
2650
],
[
3186,
3194
],
[
3576,
3584
],
[
4794,
4802
]
],
[
[
172,
178
],
[
3623,
3629
]
],
[
[
212,
216
],
[
2244,
2248
],
[
3775,
3779
]
],
[
[
218,
228
],
[
310,
320
]
],
[
[
230,
238
],
[
1991,
1999
]
],
[
[
259,
267
],
[
1488,
1496
],
[
3283,
3291
]
],
[
[
288,
293
],
[
622,
627
],
[
2057,
2062
],
[
756,
761
],
[
808,
813
],
[
2470,
2475
],
[
2522,
2527
],
[
4139,
4144
],
[
4191,
4196
],
[
4406,
4411
]
],
[
[
301,
309
]
],
[
[
1977,
1990
]
],
[
[
2212,
2223
]
],
[
[
3742,
3754
]
]
] |
from CreateTimeGraphs import *
def create_diag(dc):
"""Time spent on TeamSpeak per user"""
globalTime = timedelta()
for u in dc.users:
# Time in seconds
u.time = timedelta()
for con in u.connections:
# Increase connected time
u.time += con.duration()
us = sorted(dc.users, key = lambda u: -u.time)
for u in us:
globalTime += u.time
us = us[:maxUsers]
# Create users graph
with openTempfile("usertime") as f:
for u in us:
# Time in days
f.write('"{0}"\t{1}\n'.format(gnuplotEscape(u.name), u.time / timedelta(days = 1)))
# Create the diagram
diag = Diagram("usertime", "Time spent on TeamSpeak", 1920, 800)
diag.xlabel = "User"
diag.ylabel = "Connection time (in days)"
diag.legend = "right"
diag.appendText = """\
set timefmt "%H:%M:%S"
set format x "%H:%M:%S"
set yrange [0:]
set xtics rotate by -90
set style histogram clustered gap 4
set boxwidth 0.8 relative
"""
diag.plots.append("using 0:2:xticlabels(1) title 'Time' with boxes")
diag.subtitle = "Sum of all time spent on this server: {0}".format(timeToString(globalTime))
diag.render(dc.diagramTemplate)
dc.generalTab.addDiagram(diag)
| [
[
[
29,
30
],
[
107,
116
],
[
170,
179
],
[
362,
370
],
[
401,
413
],
[
498,
511
],
[
530,
539
],
[
583,
590
],
[
1059,
1071
]
],
[
[
36,
47
]
]
] |
from .proxy import current_app, _app_context_ctx, switch_app, get_app
__all__ = [
current_app, _app_context_ctx, switch_app, get_app
]
| [
[
[
19,
30
],
[
87,
98
]
],
[
[
32,
48
],
[
100,
116
]
],
[
[
50,
60
],
[
118,
128
]
],
[
[
62,
69
],
[
130,
137
]
],
[
[
71,
78
]
]
] |
from functools import wraps
from os import environ
from backends.exceptions import ErrorException
def wrap_exception(exception_type, error_message):
def _typed_exception_wrapper(func):
@wraps(func)
def _adapt_exception_types(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_type as ex:
raise ErrorException(error_message) from ex
return _adapt_exception_types
return _typed_exception_wrapper
def getenv_required(key):
try:
return environ[key]
except KeyError:
raise ErrorException(
'Required environment variable %s not set' % key)
def getenv_int(key, default):
try:
value = environ[key]
except KeyError:
return default
try:
return int(value)
except ValueError:
raise ErrorException(
'Environment variable %s with value %s '
'is not convertible to int' % (key, value))
| [
[
[
22,
27
],
[
201,
206
]
],
[
[
43,
50
],
[
555,
562
],
[
738,
745
]
],
[
[
84,
98
],
[
603,
617
],
[
868,
882
],
[
391,
405
]
],
[
[
105,
119
]
],
[
[
509,
524
]
],
[
[
687,
697
]
]
] |
# coding: utf-8
# ### All imports
# In[1]:
from tf_idf import *
from evaluation import *
# In[3]:
corpus_dict = loadCorpus("corpus")
print("corpus loaded")
# In[4]:
len(corpus_dict)
corpus_dict['CACM-0637']
# In[5]:
full_corpus_dict = loadCorpus("full_corpus")
print("full_corpus loaded")
# In[6]:
full_corpus_dict['CACM-0270']
# In[7]:
# Load term frequencies
unigrams_dict = generateUnigrams(corpus_dict)
print("Unigrams loaded")
len(unigrams_dict)
# In[8]:
# Compute DF and IDF
df = generateDocumentFrequencies(corpus_dict)
print(len(df))
idf = calculateIDF(corpus_dict, df)
idf['algorithm']
# In[9]:
queries = retrieveQueries("cacm.query.txt")
#queries
# In[10]:
results={}
count = 1
for query in queries:
query_id = "q_"+str(count)
print(query_id)
results[query_id] = sortByScore(queryForResults(query, df, corpus_dict, idf))
count+=1
print("Results generated for all queries")
#results['q_1']
# In[11]:
# Write Results to File
for query_id, query_result in results.items():
print("Writing result to file "+query_id)
writeResultsToFile(query_result, "TF_IDF_"+query_id, query_id)
# In[ ]:
count = 1
n=20
for query in queries:
query_id = "q_"+str(count)
print(query_id)
html_file = generateHTML(generateSnippets(results[query_id], full_corpus_dict, query, n), results[query_id], query, n)
f = open("query_results_"+query_id+".html", "w+")
f.write(html_file)
f.close()
count+=1
#break
print("done")
# In[12]:
query_rel = {}
# In[13]:
query_rel = queryRelevance()
print(len(query_rel.keys()))
print(query_rel["q_1"])
# In[14]:
relevance_set=generateRelevanceSet(query_rel, results)
len(relevance_set)
# In[15]:
# Calculate MRR
mrr_output=calculateMRR(relevance_set)
mrr_output
mrr_output_string=""
f=open("MRR_TF_IDF.txt", "w+")
for query, mrr_score in mrr_output.items():
mrr_output_string+=query+" : "+str(mrr_score)+"\n"
f.write(mrr_output_string)
f.close()
# In[16]:
# Calculate P@K
f=open("P@K_TF_IDF.txt", "w+")
pAtKOutput=""
for query_id, rel_set in relevance_set.items():
pAtKOutput+=query_id+"\n\n"
K=5
pAtKOutput+="K = "+str(K)+"\n"
pAtKOutput+="Precision = "+str(precisionAtK(relevance_set, K, query_id))+"\n"
K=20
pAtKOutput+="K = "+str(K)+"\n"
pAtKOutput+="Precision = "+str(precisionAtK(relevance_set, K, query_id))+"\n"
pAtKOutput+="\n------------------\n\n"
f.write(pAtKOutput)
f.close()
print("Done")
# In[17]:
# Calculate Mean Average Precision (MAP)
f=open("MAP_TF_IDF.txt", "w+")
mapOutput="MAP SCORES FOR TF IDF \n\n"
for query_id in relevance_set.keys():
mapOutput+=query_id+" : "+str(calculateMAP(relevance_set, query_id))+"\n"
f.write(mapOutput)
f.close()
print("Done")
# In[18]:
# Generate all precision tables
for query_id in relevance_set.keys():
generateAllPrecisionTable(relevance_set, query_id)
# In[19]:
# Generate full Recall tables
for query_id in relevance_set.keys():
generateRecallTable(relevance_set, query_id)
| [
[
[
67,
68
]
],
[
[
92,
93
],
[
121,
131
],
[
252,
262
],
[
402,
418
],
[
516,
543
],
[
578,
590
],
[
648,
663
],
[
823,
834
],
[
835,
850
],
[
1094,
1112
],
[
1274,
1286
],
[
1287,
1303
],
[
1565,
1579
],
[
1663,
1683
],
[
1765,
1777
],
[
2231,
2243
],
[
2362,
2374
],
[
2700,
2712
],
[
2880,
2905
],
[
3018,
3037
]
],
[
[
107,
118
],
[
182,
193
],
[
195,
206
],
[
419,
430
],
[
544,
555
],
[
591,
602
],
[
862,
873
]
],
[
[
233,
249
],
[
319,
335
],
[
1323,
1339
]
],
[
[
386,
399
],
[
461,
474
]
],
[
[
511,
513
],
[
567,
569
],
[
604,
606
],
[
858,
860
]
],
[
[
572,
575
],
[
608,
611
],
[
875,
878
]
],
[
[
638,
645
],
[
739,
746
],
[
1198,
1205
]
],
[
[
705,
712
],
[
803,
810
],
[
1027,
1034
],
[
1304,
1311
],
[
1352,
1359
],
[
1695,
1702
]
],
[
[
716,
721
],
[
772,
777
],
[
885,
890
]
],
[
[
730,
735
],
[
851,
856
]
],
[
[
752,
760
],
[
789,
797
],
[
811,
819
]
],
[
[
1001,
1009
],
[
1080,
1088
],
[
1137,
1145
],
[
1147,
1155
]
],
[
[
1011,
1023
],
[
1113,
1125
]
],
[
[
1170,
1175
],
[
1231,
1236
],
[
1476,
1481
]
],
[
[
1180,
1181
],
[
1348,
1349
],
[
1378,
1379
]
],
[
[
1189,
1194
],
[
1341,
1346
],
[
1371,
1376
]
],
[
[
1211,
1219
],
[
1248,
1256
],
[
1312,
1320
],
[
1360,
1368
],
[
1411,
1419
]
],
[
[
1262,
1271
],
[
1447,
1456
]
],
[
[
1385,
1386
],
[
1439,
1440
],
[
1462,
1463
]
],
[
[
1524,
1533
]
],
[
[
1553,
1562
],
[
1592,
1601
],
[
1617,
1626
],
[
1684,
1693
]
],
[
[
1649,
1662
],
[
1708,
1721
],
[
1778,
1791
],
[
2098,
2111
],
[
2244,
2257
],
[
2375,
2388
],
[
2644,
2657
],
[
2713,
2726
],
[
2854,
2867
],
[
2906,
2919
],
[
2992,
3005
],
[
3038,
3051
]
],
[
[
1754,
1764
],
[
1797,
1807
],
[
1885,
1895
]
],
[
[
1808,
1825
],
[
1909,
1926
],
[
1968,
1985
]
],
[
[
1829,
1830
],
[
1960,
1961
],
[
1987,
1988
]
],
[
[
1865,
1870
],
[
1928,
1933
]
],
[
[
1872,
1881
],
[
1944,
1953
]
],
[
[
2028,
2029
],
[
2453,
2454
],
[
2473,
2474
]
],
[
[
2059,
2069
],
[
2125,
2135
],
[
2461,
2471
]
],
[
[
2077,
2085
],
[
2137,
2145
],
[
2262,
2270
],
[
2393,
2401
]
],
[
[
2087,
2094
]
],
[
[
2157,
2158
],
[
2188,
2189
],
[
2259,
2260
]
],
[
[
2287,
2288
],
[
2319,
2320
],
[
2390,
2391
]
],
[
[
2553,
2554
],
[
2749,
2750
],
[
2768,
2769
]
],
[
[
2584,
2593
],
[
2670,
2679
],
[
2757,
2766
]
],
[
[
2632,
2640
],
[
2681,
2689
],
[
2728,
2736
]
],
[
[
2842,
2850
],
[
2921,
2929
]
],
[
[
2980,
2988
],
[
3053,
3061
]
]
] |
import os
"""This submodule aims to provide utilities for the gaussian software package.
It will allow the user to quickly write custom interfaces to analyse the output files.
"""
class Extractor:
"""This class supports data extraction from gaussian output files.
It provides functionality to extract all the implemented data at once or custom extraction
can be set up by using its public methods.
"""
def __init__(self, filepath, labels=None):
self.filepath = filepath
self.labels = labels
self.normal_executions = 0
# Initialize
self.check_normal_execution()
self.check_frequencies()
self.label_positions = self._get_label_positions()
def check_normal_execution(self):
"""Checks for normal execution
Checks for normal execution of the gaussian output file.
Use this first when writing custom extraction methods to check the validity of the calculations.
Returns:
(bool): Returns True when a calculation has normal execution.
"""
with open(self.filepath, "r") as f:
for line in f:
if 'Normal termination of Gaussian' in line:
self.normal_executions += 1
if self.labels != None:
if self.normal_executions == len(self.labels)+1:
return True
else:
raise Exception('There are {} Normal terminations, please check this file manually: {}'.format(
self.normal_executions, self.filepath))
else:
if self.normal_executions == 0:
raise Exception(
'There are no normal terminations, please check this file manually: {}'.format(self.filepath))
elif self.normal_executions == 1:
return True
else:
raise Exception(
'There are multiple normal terminations, please set the labels when constructing the flagg.')
def check_frequencies(self):
"""Check for negative (imaginary) frequencies.
Returns:
(bool): Returns True if no negative frequencies are found.
Raises:
Exception: Raises when negative frequencies are found.
"""
with open(self.filepath, 'r') as f:
imag = False
vals = []
for line in f:
if 'Frequencies -- ' in line:
vals.append(line)
split = vals[-1].split()
if float(split[2]) < 0:
imag = True
if float(split[3]) < 0:
imag = True
if float(split[4]) < 0:
imag = True
if imag:
raise Exception(
'There are imaginary frequencies, please check this file manually: {}'.format(self.filepath))
else:
return True
def _get_label_positions(self):
results = []
with open(self.filepath, 'r') as f:
for i, line in enumerate(f):
for l in self.labels:
if l in line:
results.append([i, l])
for i, n in enumerate(results):
if n[0] == results[i-1][0]:
results.remove(results[i-1])
def clean_list():
for i, n in enumerate(results):
if n[1] == results[i-1][1]:
results.remove(results[i-1])
clean_list()
clean_list()
return results
def extract_error(self):
with open(self.filepath, 'r') as f:
temp = None
for line in f:
if 'Error termination' in line:
return temp
else:
temp = line
def _extract_geometry(self, file):
file.readline()
file.readline()
file.readline()
file.readline()
atoms = []
xyz = []
is_molecule = True
while is_molecule:
# read and process the line
line = file.readline()
split = line.split()
# check if is still the molecule
if len(split) == 1:
is_molecule = False
else:
# process the line
atoms.append(split[1])
coords = []
coords.append(split[3])
coords.append(split[4])
coords.append(split[5])
xyz.append(coords)
return atoms, xyz
def extract_optimized_geometry(self):
"""Extracts the optimized geometry
Extracts the optimized geometry from the gaussian output file.
Returns:
(tuple): tuple containing:
atoms (list) : Atom numbers
coördinates (list): Cartesian coordinates in a 2D list
"""
results = []
with open(self.filepath, 'r') as f:
for line in f:
if 'Standard orientation' in line:
atoms, xyz = self._extract_geometry(f)
results.append([atoms, xyz])
if self.labels[1] in line:
break
return results[-2]
def extract_SCF(self):
vals = []
results = []
with open(self.filepath, 'r') as f:
for i, line in enumerate(f):
if 'SCF Done' in line:
split = line.split()
vals.append([i, split[4]])
for p in self._get_label_positions():
temp = None
for v in vals:
if v[0] < p[0]:
temp = v
temp = [p[1], temp[1]]
results.append(temp)
return results
def extract_HOMO_energy(self):
with open(self.filepath, 'r') as f:
inFreq = False
vals = []
for line in f:
if 'Link1' in line:
inFreq = True
if self.labels[1] in line:
inFreq = False
if inFreq:
if 'Alpha occ. eigenvalues' in line:
vals.append(line)
split = vals[-1].split()
return split[-1]
def extract_LUMO_energy(self):
with open(self.filepath, 'r') as f:
inFreq = False
vals = []
for line in f:
if 'Link1' in line:
inFreq = True
if self.labels[1] in line:
inFreq = False
if inFreq:
if 'Alpha virt. eigenvalues' in line:
vals.append(line)
split = vals[0].split()
return split[4]
def extract_zero_point_correction(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Zero-point correction' in line:
split = line.split()
return split[2]
def extract_thermal_correction_to_energy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Energy' in line:
split = line.split()
return split[4]
def extract_thermal_correction_to_enthalpy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Enthalpy' in line:
split = line.split()
return split[4]
def extract_thermal_correction_to_gibbs_free_energy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Gibbs Free Energy' in line:
split = line.split()
return split[6]
def _extract_npa(self, file):
file.readline()
file.readline()
file.readline()
file.readline()
file.readline()
natural_charges = []
is_molecule = True
while is_molecule:
line = file.readline()
split = line.split()
if len(split) == 1:
is_molecule = False
else:
natural_charges.append(split[2])
return natural_charges
def extract_npas(self):
results = []
with open(self.filepath, 'r') as f:
vals = []
for line in f:
if 'Summary of Natural Population Analysis:' in line:
vals.append(self._extract_npa(f))
results.append(vals[0])
results.append(vals[1])
results.append(vals[4])
return results
| [
[
[
7,
9
]
],
[
[
189,
198
]
]
] |
#!/usr/bin/env python3
import unittest
import torch
import gpytorch
from gpytorch.test.variational_test_case import VariationalTestCase
class TestUnwhitenedVariationalGP(VariationalTestCase, unittest.TestCase):
@property
def batch_shape(self):
return torch.Size([])
@property
def distribution_cls(self):
return gpytorch.variational.CholeskyVariationalDistribution
@property
def mll_cls(self):
return gpytorch.mlls.VariationalELBO
@property
def strategy_cls(self):
return gpytorch.variational.UnwhitenedVariationalStrategy
def test_training_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock, ciq_mock = super().test_training_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertFalse(ciq_mock.called)
if self.distribution_cls == gpytorch.variational.CholeskyVariationalDistribution:
self.assertEqual(cholesky_mock.call_count, 3) # One for each forward pass, once for initialization
else:
self.assertEqual(cholesky_mock.call_count, 2) # One for each forward pass
def test_eval_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock, ciq_mock = super().test_eval_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertFalse(ciq_mock.called)
self.assertEqual(cholesky_mock.call_count, 1) # One to compute cache, that's it!
def test_fantasy_call(self, *args, **kwargs):
# we only want to check CholeskyVariationalDistribution
if self.distribution_cls is gpytorch.variational.CholeskyVariationalDistribution:
return super().test_fantasy_call(*args, **kwargs)
with self.assertRaises(AttributeError):
super().test_fantasy_call(*args, **kwargs)
class TestUnwhitenedPredictiveGP(TestUnwhitenedVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.PredictiveLogLikelihood
class TestUnwhitenedRobustVGP(TestUnwhitenedVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.GammaRobustVariationalELBO
class TestUnwhitenedMeanFieldVariationalGP(TestUnwhitenedVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedMeanFieldPredictiveGP(TestUnwhitenedPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedMeanFieldRobustVGP(TestUnwhitenedRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedDeltaVariationalGP(TestUnwhitenedVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestUnwhitenedDeltaPredictiveGP(TestUnwhitenedPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestUnwhitenedDeltaRobustVGP(TestUnwhitenedRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
if __name__ == "__main__":
unittest.main()
| [
[
[
31,
39
],
[
196,
204
],
[
3253,
3261
]
],
[
[
48,
53
],
[
272,
277
]
],
[
[
62,
70
],
[
349,
357
],
[
455,
463
],
[
543,
551
],
[
862,
870
],
[
1594,
1602
],
[
1931,
1939
],
[
2083,
2091
],
[
2260,
2268
],
[
2448,
2456
],
[
2630,
2638
],
[
2816,
2824
],
[
2996,
3004
],
[
3170,
3178
]
],
[
[
119,
138
],
[
175,
194
]
],
[
[
147,
174
],
[
1849,
1876
],
[
2001,
2028
],
[
2169,
2196
],
[
2725,
2752
]
],
[
[
1822,
1848
],
[
2358,
2384
],
[
2906,
2932
]
],
[
[
1977,
2000
],
[
2543,
2566
],
[
3083,
3106
]
],
[
[
2132,
2168
]
],
[
[
2322,
2357
]
],
[
[
2510,
2542
]
],
[
[
2692,
2724
]
],
[
[
2874,
2905
]
],
[
[
3054,
3082
]
]
] |
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
Observers for XMLSchema classes.
"""
from functools import wraps
from ..names import XSD_NAMESPACE, XSD_ANY_TYPE
from ..validators import XMLSchema10, XMLSchema11, XsdGroup, XsdAttributeGroup, XsdComplexType
class SchemaObserver:
"""
Observer that registers created components. Run the 'clear' method after each usage.
"""
components = []
dummy_components = []
@classmethod
def observed_builder(cls, builder):
if isinstance(builder, type):
class BuilderProxy(builder):
def __init__(self, *args, **kwargs):
super(BuilderProxy, self).__init__(*args, **kwargs)
if not cls.is_dummy_component(self):
cls.components.append(self)
else:
cls.dummy_components.append(self)
BuilderProxy.__name__ = builder.__name__
return BuilderProxy
elif callable(builder):
@wraps(builder)
def builder_proxy(*args, **kwargs):
obj = builder(*args, **kwargs)
if not cls.is_dummy_component(obj):
cls.components.append(obj)
else:
cls.dummy_components.append(obj)
return obj
return builder_proxy
@classmethod
def clear(cls):
del cls.components[:]
del cls.dummy_components[:]
@classmethod
def is_dummy_component(cls, component):
# Dummy components are empty attribute groups and xs:anyType
# definitions not related to XSD namespace.
if component.parent in cls.dummy_components:
return True
elif isinstance(component, XsdAttributeGroup):
return not component
elif isinstance(component, XsdComplexType):
return component.name == XSD_ANY_TYPE and \
component.target_namespace != XSD_NAMESPACE
elif isinstance(component, XsdGroup) and component.parent is not None:
return component.parent.name == XSD_ANY_TYPE and \
component.target_namespace != XSD_NAMESPACE
return False
class ObservedXMLSchema10(XMLSchema10):
BUILDERS = {
k: SchemaObserver.observed_builder(getattr(XMLSchema10.BUILDERS, k))
for k in getattr(XMLSchema10.BUILDERS, '_fields')
}
class ObservedXMLSchema11(XMLSchema11):
BUILDERS = {
k: SchemaObserver.observed_builder(getattr(XMLSchema11.BUILDERS, k))
for k in getattr(XMLSchema11.BUILDERS, '_fields')
}
| [
[
[
395,
400
],
[
1306,
1311
]
],
[
[
422,
435
],
[
2256,
2269
],
[
2458,
2471
]
],
[
[
437,
449
],
[
2191,
2203
],
[
2393,
2405
]
],
[
[
475,
486
],
[
2521,
2532
],
[
2654,
2665
],
[
2603,
2614
]
],
[
[
488,
499
],
[
2721,
2732
],
[
2854,
2865
],
[
2803,
2814
]
],
[
[
501,
509
],
[
2305,
2313
]
],
[
[
511,
528
],
[
2049,
2066
]
],
[
[
530,
544
],
[
2137,
2151
]
],
[
[
553,
567
],
[
2563,
2577
],
[
2763,
2777
]
],
[
[
2501,
2520
]
],
[
[
2701,
2720
]
]
] |
import pytest
from naturalnets.brains.i_layer_based_brain import ILayerBasedBrainCfg
from tests.pytorch_brains import IPytorchBrainCfg
@pytest.fixture
def torch_config() -> IPytorchBrainCfg:
return IPytorchBrainCfg(type="GRU_PyTorch", num_layers=3,
hidden_size=8,
use_bias=False)
@pytest.fixture
def numpy_config() -> ILayerBasedBrainCfg:
return ILayerBasedBrainCfg(type="GRULayered", hidden_layer_structure=[8, 8, 8], diagonal_hidden_to_hidden=False,
use_bias=False)
| [
[
[
7,
13
],
[
137,
143
],
[
344,
350
]
],
[
[
65,
84
],
[
381,
400
],
[
413,
432
]
],
[
[
118,
134
],
[
174,
190
],
[
203,
219
]
],
[
[
156,
168
]
],
[
[
363,
375
]
]
] |
# coding=utf-8
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end tests for FixedReplayRunner."""
import datetime
import os
import shutil
from absl import flags
from batch_rl.fixed_replay import train
import tensorflow as tf
FLAGS = flags.FLAGS
class FixedReplayRunnerIntegrationTest(tf.test.TestCase):
"""Tests for Atari environment with various agents.
"""
def setUp(self):
super(FixedReplayRunnerIntegrationTest, self).setUp()
FLAGS.base_dir = os.path.join(
'/tmp/batch_rl_tests',
datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))
self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints')
self._logging_dir = os.path.join(FLAGS.base_dir, 'logs')
def quickFixedReplayREMFlags(self):
"""Assign flags for a quick run of FixedReplay agent."""
FLAGS.gin_bindings = [
"create_runner.schedule='continuous_train_and_eval'",
'FixedReplayRunner.training_steps=100',
'FixedReplayRunner.evaluation_steps=10',
'FixedReplayRunner.num_iterations=1',
'FixedReplayRunner.max_steps_per_episode=100',
]
FLAGS.alsologtostderr = True
FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin']
FLAGS.agent_name = 'multi_head_dqn'
def verifyFilesCreated(self, base_dir):
"""Verify that files have been created."""
# Check checkpoint files
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0')))
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint')))
self.assertTrue(
os.path.exists(
os.path.join(self._checkpoint_dir,
'sentinel_checkpoint_complete.0')))
# Check log files
self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0')))
def testIntegrationFixedReplayREM(self):
"""Test the FixedReplayMultiHeadDQN agent."""
assert FLAGS.replay_dir is not None, 'Please provide a replay directory'
tf.logging.info('####### Training the REM agent #####')
tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir))
tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir))
self.quickFixedReplayREMFlags()
train.main([])
self.verifyFilesCreated(FLAGS.base_dir)
shutil.rmtree(FLAGS.base_dir)
if __name__ == '__main__':
tf.test.main()
| [
[
[
618,
626
],
[
1036,
1044
]
],
[
[
634,
636
],
[
983,
985
],
[
1125,
1127
],
[
1193,
1195
],
[
1908,
1910
],
[
1923,
1925
],
[
1999,
2001
],
[
2014,
2016
],
[
2094,
2096
],
[
2122,
2124
],
[
2260,
2262
],
[
2275,
2277
]
],
[
[
644,
650
],
[
2795,
2801
]
],
[
[
671,
676
],
[
751,
756
]
],
[
[
712,
717
],
[
2732,
2737
]
],
[
[
725,
741
],
[
804,
806
],
[
2855,
2857
],
[
2493,
2495
],
[
2553,
2555
],
[
2624,
2626
]
],
[
[
743,
748
],
[
966,
971
],
[
1138,
1143
],
[
1206,
1211
],
[
1334,
1339
],
[
1627,
1632
],
[
1660,
1665
],
[
1724,
1729
],
[
2423,
2428
],
[
2603,
2608
],
[
2673,
2678
],
[
2775,
2780
],
[
2809,
2814
]
],
[
[
771,
803
],
[
914,
946
]
]
] |
from django.forms import forms, ModelForm
class FileForm(forms.Form):
file_name = forms.FileField(label=u"文件名称")
| [
[
[
25,
30
],
[
62,
67
],
[
92,
97
]
],
[
[
32,
41
]
],
[
[
53,
61
]
]
] |
from __future__ import print_function
import sys
CLI = False
DEBUG = False
AUTO = False
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def setdebug(state):
global DEBUG
DEBUG = state
Debug("Python:")
for p in sys.version.split("\n"):
Debug(p)
def setauto(state):
global AUTO
AUTO = state
def isauto():
global AUTO
return AUTO
def setcli():
global CLI
CLI = True
def Warn(*message):
"""
:param Any message:
"""
print("[WARN] " + " ".join([str(m) for m in message]))
def Inform(*message):
"""
:param Any message:
"""
print("[AUTOMATIA] " + " ".join([str(m) for m in message]))
def Debug(*message):
global DEBUG
if DEBUG:
Inform("[D] " + " ".join([str(m) for m in message]))
def Error(*message):
"""
:param Any message:
"""
eprint("[ERROR] " + " ".join([str(m) for m in message]))
class FinishFinal(Exception):
pass
class FinishNow(FinishFinal):
pass
class FinishResult(Exception):
def __init__(self, URL, m=None):
self.URL = URL
self.m = m
| [
[
[
23,
37
]
],
[
[
46,
49
],
[
143,
146
],
[
258,
261
]
],
[
[
51,
54
]
],
[
[
63,
68
],
[
744,
749
]
],
[
[
77,
81
],
[
397,
401
]
],
[
[
96,
102
],
[
879,
885
]
],
[
[
171,
179
]
],
[
[
306,
313
]
],
[
[
360,
366
]
],
[
[
408,
414
]
],
[
[
454,
458
]
],
[
[
575,
581
],
[
759,
765
]
],
[
[
703,
708
],
[
228,
233
],
[
291,
296
]
],
[
[
818,
823
]
],
[
[
944,
955
],
[
995,
1006
]
],
[
[
985,
994
]
],
[
[
1026,
1038
]
],
[
[
209,
214
]
],
[
[
342,
346
]
],
[
[
437,
440
]
]
] |
from django.apps import AppConfig
class TimelineappConfig(AppConfig):
name = 'timelineApp'
| [
[
[
24,
33
],
[
60,
69
]
],
[
[
42,
59
]
]
] |
from typing import Any
class DataContainer:
def __init__(self, train: Any, validation: Any, test: Any):
self.train = train
self.validation = validation
self.test = test
| [
[
[
19,
22
],
[
76,
79
],
[
93,
96
],
[
104,
107
]
],
[
[
31,
44
]
]
] |
class EC():
def __init__(self, id=None):
self.database = 'EC'
self.id = id
self._long_name = 'EC (Enzyme Commission) number of the Nomenclature Committee of the International Union of Biochemistry and Molecular Biology (IUBMB) Database of Interacting Proteins'
self._web = 'https://iubmb.qmul.ac.uk/enzyme/'
def __call__(self):
tmp_dict = {
'database' : 'EC',
'id' : self.id
}
return tmp_dict
def __repr__(self):
return f'<EC: {self.id}>'
def __str__(self):
return f'EC: {self.id}'
def __deepcopy__(self):
return EC(id=self.id)
def _webid(self):
return self._web
def _repr_html_(self):
return f'<a href="https://enzyme.expasy.org/EC/{self.id}">{self.database}: {self.id}</a>'
| [
[
[
6,
8
],
[
664,
666
]
]
] |
"""dailypythontip home app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from home import views
app_name = 'home'
apis = [
path('tips/', views.api_tip_list, name='api-tip-list'),
path('tips/<int:pk>/', views.api_tip_detail, name='api-tip-detail'),
]
urlpatterns = [
path('', views.index, name='index'),
path('retweet/<int:tweet_id>/', views.retweet, name='retweet'),
path('search/', views.search_tips, name='search-tips'),
path('filter/<str:tag>/', views.filter_tag, name='filter-tag'),
path('sort/<str:criteria>/', views.sort_tips, name='sort-tips'),
path('today/', views.todays_tip, name='retrieve-today'),
path('accounts/register/', views.create_account, name='register'),
path('link_twitter/', views.link_twitter, name='link-twitter'),
path('accounts/login/', views.log_in, name='login'),
path('logout/', views.log_out, name='logout'),
path('api/', include(apis)),
]
| [
[
[
671,
675
],
[
742,
746
],
[
802,
806
],
[
894,
898
],
[
935,
939
],
[
1003,
1007
],
[
1063,
1067
],
[
1131,
1135
],
[
1200,
1204
],
[
1261,
1265
],
[
1332,
1336
],
[
1400,
1404
],
[
1457,
1461
],
[
1508,
1512
]
],
[
[
677,
684
],
[
1521,
1528
]
],
[
[
703,
708
],
[
756,
761
],
[
825,
830
],
[
903,
908
],
[
967,
972
],
[
1019,
1024
],
[
1089,
1094
],
[
1160,
1165
],
[
1215,
1220
],
[
1288,
1293
],
[
1354,
1359
],
[
1424,
1429
],
[
1473,
1478
]
],
[
[
710,
718
]
],
[
[
729,
733
],
[
1529,
1533
]
],
[
[
874,
885
]
]
] |
import json
import logging
import os
import sys
import lambda_utils as utils
"""
Configure these environment variables in your Lambda environment or
CloudFormation Inputs settings):
1. TARGET_FQDN (mandatory): The Fully Qualified DNS Name used for application
cluster
2. ELB_TG_ARN (mandatory): The ARN of the Elastic Load Balancer's target group
3. DNS_SERVER (optional): The DNS Servers to query TARGET_FQDN if you do not want to use AWS default (i.e., if you want to run this function attached to a VPC and use its resolver)
"""
if 'TARGET_FQDN' in os.environ:
TARGET_FQDN = os.environ['TARGET_FQDN']
else:
print("ERROR: Missing Target Hostname.")
sys.exit(1)
if 'ELB_TG_ARN' in os.environ:
ELB_TG_ARN = os.environ['ELB_TG_ARN']
else:
print("ERROR: Missing Destination Target Group ARN.")
sys.exit(1)
if 'DNS_SERVER' in os.environ:
DNS_SERVER = os.environ['DNS_SERVER']
else:
print("Info: DNS resolver not specified, using default.")
DNS_SERVER = None
# MAIN Function - This function will be invoked when Lambda is called
def lambda_handler(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info("INFO: Received event: {}".format(json.dumps(event)))
# Get Currently Resgistered IPs list
logger.info("INFO: Checking existing target group members")
registered_ip_list = utils.describe_target_health(ELB_TG_ARN)
# Query DNS for hostname IPs
logger.info("INFO: Performing DNS lookup")
try:
hostname_ip_list = []
dns_lookup_result = utils.dns_lookup(DNS_SERVER, TARGET_FQDN, "A")
hostname_ip_list = dns_lookup_result + hostname_ip_list
logger.info(f"INFO: Hostname IPs resolved by DNS lookup: {format(hostname_ip_list)}")
# IP list to register with target group, minus existing IPs
new_ips_to_register_list = list(set(hostname_ip_list) - set(registered_ip_list))
# Register new targets
if new_ips_to_register_list:
logger.info(f"INFO: Registering {format(new_ips_to_register_list)}")
utils.register_target(ELB_TG_ARN, new_ips_to_register_list)
else:
logger.info("INFO: No IPs to register.")
# IP list to remove from the target group, minus the currently resolved ones
old_ips_to_remove_list = list(set(registered_ip_list) - set(hostname_ip_list))
# Remove old IPs from the target group
if old_ips_to_remove_list:
logger.info(f"INFO: Removing old IPs: {format(old_ips_to_remove_list)}")
utils.deregister_target(ELB_TG_ARN, old_ips_to_remove_list)
else:
logger.info("INFO: Target group members up to date, nothing to remove")
logger.info("INFO: Update completed successfuly.")
# Exception handler
except Exception as e:
logger.error("ERROR:", e)
logger.error("ERROR: Invocation failed.")
return(1)
return (0)
| [
[
[
7,
11
],
[
1222,
1226
]
],
[
[
19,
26
],
[
1118,
1125
],
[
1158,
1165
]
],
[
[
34,
36
],
[
556,
558
],
[
586,
588
],
[
699,
701
],
[
728,
730
],
[
853,
855
],
[
882,
884
]
],
[
[
44,
47
],
[
667,
670
],
[
821,
824
]
],
[
[
56,
77
],
[
1377,
1382
],
[
1566,
1571
],
[
2091,
2096
],
[
2580,
2585
]
],
[
[
572,
583
],
[
1595,
1606
]
],
[
[
715,
725
],
[
1406,
1416
],
[
2113,
2123
],
[
2604,
2614
]
],
[
[
869,
879
],
[
1583,
1593
]
],
[
[
979,
989
],
[
1583,
1593
]
],
[
[
1073,
1087
]
]
] |
from typing import Dict, List
from pydantic import BaseModel, Extra
class NewTeam(BaseModel):
name: str
members: Dict[str, List[str]] # uid, role
class Config:
extra = Extra.forbid
class Team(NewTeam):
uid: str
| [
[
[
19,
23
],
[
124,
128
]
],
[
[
25,
29
],
[
134,
138
]
],
[
[
52,
61
],
[
85,
94
]
],
[
[
63,
68
],
[
193,
198
]
],
[
[
77,
84
],
[
219,
226
]
],
[
[
214,
218
]
]
] |
from django.test import TestCase
from webinterface.models import *
class AssignmentTest(TestCase):
@classmethod
def setUpTestData(cls):
# Config
cls.reference_week = 2500
# Schedule
cls.schedule = Schedule.objects.create(name="schedule", cleaners_per_date=2, frequency=2, weekday=3)
# Cleaners
cls.cleaner1 = Cleaner.objects.create(name="cleaner1")
cls.cleaner2 = Cleaner.objects.create(name="cleaner2")
cls.cleaner3 = Cleaner.objects.create(name="cleaner3")
# CleaningDays
cls.cleaning_week1 = CleaningWeek.objects.create(week=cls.reference_week, schedule=cls.schedule)
cls.cleaning_week2 = CleaningWeek.objects.create(week=cls.reference_week + 1, schedule=cls.schedule)
# Assignments
cls.assignment1 = Assignment.objects.create(
cleaner=cls.cleaner1, schedule=cls.schedule, cleaning_week=cls.cleaning_week1)
cls.assignment2 = Assignment.objects.create(
cleaner=cls.cleaner2, schedule=cls.schedule, cleaning_week=cls.cleaning_week1)
cls.assignment3 = Assignment.objects.create(
cleaner=cls.cleaner3, schedule=cls.schedule, cleaning_week=cls.cleaning_week2)
# DutySwitch
cls.dutyswitch = DutySwitch.objects.create(requester_assignment=cls.assignment1)
def test__str(self):
self.assertIn(self.schedule.name, self.assignment1.__str__())
self.assertIn(self.cleaner1.name, self.assignment1.__str__())
self.assertIn(self.assignment1.assignment_date().strftime('%d. %b %Y'), self.assignment1.__str__())
def test__assignment_date(self):
self.assertEqual(self.assignment1.assignment_date(),
epoch_week_to_monday(self.reference_week) + datetime.timedelta(days=self.schedule.weekday))
def test__all_cleaners_in_week_for_schedule(self):
all_cleaners = self.assignment1.all_cleaners_in_week_for_schedule()
self.assertIn(self.cleaner1, all_cleaners)
self.assertIn(self.cleaner2, all_cleaners)
self.assertNotIn(self.cleaner3, all_cleaners)
def test__other_cleaners_in_week_for_schedule(self):
other_cleaners = self.assignment1.other_cleaners_in_week_for_schedule()
self.assertNotIn(self.cleaner1, other_cleaners)
self.assertIn(self.cleaner2, other_cleaners)
self.assertNotIn(self.cleaner3, other_cleaners)
def test__switch_requested(self):
self.assertEqual(self.assignment1.switch_requested(), self.dutyswitch)
self.assertEqual(self.assignment2.switch_requested(), None)
| [
[
[
24,
32
],
[
90,
98
]
],
[
[
65,
66
],
[
240,
248
],
[
369,
376
],
[
432,
439
],
[
495,
502
],
[
588,
600
],
[
693,
705
],
[
822,
832
],
[
966,
976
],
[
1110,
1120
],
[
1275,
1285
],
[
1737,
1757
],
[
1781,
1789
]
],
[
[
75,
89
]
]
] |
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
import contextlib
import io
import sys
@contextlib.contextmanager
def capture_stdout_stderr(reset_seek: bool = True) -> None:
new_stderr = io.StringIO()
new_stdout = io.StringIO()
with contextlib.redirect_stderr(
new_stderr
) as err, contextlib.redirect_stdout(new_stdout) as out:
yield out, err
if reset_seek:
# Reset the file pointer to the beginning of the stream.
out.seek(0)
err.seek(0)
| [
[
[
233,
243
],
[
268,
278
],
[
425,
435
],
[
486,
496
]
],
[
[
251,
253
],
[
371,
373
],
[
402,
404
]
],
[
[
261,
264
]
],
[
[
298,
319
]
]
] |
"""
Modification of https://github.com/stanfordnlp/treelstm/blob/master/scripts/download.py
Downloads the following:
- Celeb-A dataset
- LSUN dataset
- MNIST dataset
"""
from __future__ import print_function
import os
import sys
import gzip
import json
import shutil
import zipfile
import argparse
import requests
import subprocess
from tqdm import tqdm
from six.moves import urllib
parser = argparse.ArgumentParser(description='Download dataset for DCGAN.')
parser.add_argument('datasets', metavar='N', type=str, nargs='+', choices=['celebA', 'lsun', 'mnist', 'monet'],
help='name of dataset to download [celebA, lsun, mnist, monet]')
def download(url, dirpath):
filename = url.split('/')[-1]
filepath = os.path.join(dirpath, filename)
u = urllib.request.urlopen(url)
f = open(filepath, 'wb')
filesize = int(u.headers["Content-Length"])
print("Downloading: %s Bytes: %s" % (filename, filesize))
downloaded = 0
block_sz = 8192
status_width = 70
while True:
buf = u.read(block_sz)
if not buf:
print('')
break
else:
print('', end='\r')
downloaded += len(buf)
f.write(buf)
status = (("[%-" + str(status_width + 1) + "s] %3.2f%%") %
('=' * int(float(downloaded) / filesize * status_width) + '>', downloaded * 100. / filesize))
print(status, end='')
sys.stdout.flush()
f.close()
return filepath
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id': id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, chunk_size=32*1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size,
unit='B', unit_scale=True, desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip(filepath):
print("Extracting: " + filepath)
dirpath = os.path.dirname(filepath)
with zipfile.ZipFile(filepath) as zf:
zf.extractall(dirpath)
os.remove(filepath)
def download_celeb_a(dirpath):
data_dir = 'celebA'
if os.path.exists(os.path.join(dirpath, data_dir)):
print('Found Celeb-A - skip')
return
filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
save_path = os.path.join(dirpath, filename)
if os.path.exists(save_path):
print('[*] {} already exists'.format(save_path))
else:
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(save_path)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
def _list_categories(tag):
url = 'http://lsun.cs.princeton.edu/htbin/list.cgi?tag=' + tag
f = urllib.request.urlopen(url)
return json.loads(f.read())
def _download_lsun(out_dir, category, set_name, tag):
url = 'http://lsun.cs.princeton.edu/htbin/download.cgi?tag={tag}' \
'&category={category}&set={set_name}'.format(**locals())
print(url)
if set_name == 'test':
out_name = 'test_lmdb.zip'
else:
out_name = '{category}_{set_name}_lmdb.zip'.format(**locals())
out_path = os.path.join(out_dir, out_name)
cmd = ['curl', url, '-o', out_path]
print('Downloading', category, set_name, 'set')
subprocess.call(cmd)
def download_lsun(dirpath):
data_dir = os.path.join(dirpath, 'lsun')
if os.path.exists(data_dir):
print('Found LSUN - skip')
return
else:
os.mkdir(data_dir)
tag = 'latest'
#categories = _list_categories(tag)
categories = ['bedroom']
for category in categories:
_download_lsun(data_dir, category, 'train', tag)
_download_lsun(data_dir, category, 'val', tag)
_download_lsun(data_dir, '', 'test', tag)
def download_mnist(dirpath):
data_dir = os.path.join(dirpath, 'mnist')
if os.path.exists(data_dir):
print('Found MNIST - skip')
return
else:
os.mkdir(data_dir)
url_base = 'http://yann.lecun.com/exdb/mnist/'
file_names = ['train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz']
for file_name in file_names:
url = (url_base+file_name).format(**locals())
print(url)
out_path = os.path.join(data_dir,file_name)
cmd = ['curl', url, '-o', out_path]
print('Downloading ', file_name)
subprocess.call(cmd)
cmd = ['gzip', '-d', out_path]
print('Decompressing ', file_name)
subprocess.call(cmd)
def download_monet(dirpath):
data_dir = 'monet'
link = "https://drive.google.com/file/d/1d0MCjHbfU5MUv2NN1r-bbMJf4rf-Frpq/view?usp=sharing"
if os.path.exists(os.path.join(dirpath, 'monet')):
print('Found MONET - skip')
return
filename, drive_id = "monet.zip", "1d0MCjHbfU5MUv2NN1r-bbMJf4rf-Frpq"
save_path = os.path.join(dirpath, filename)
if os.path.exists(save_path):
print('[*] {} already exists'.format(save_path))
else:
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(save_path)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
def prepare_data_dir(path = './data'):
if not os.path.exists(path):
os.mkdir(path)
if __name__ == '__main__':
args = parser.parse_args()
prepare_data_dir()
if any(name in args.datasets for name in ['CelebA', 'celebA', 'celebA']):
download_celeb_a('./data')
if 'lsun' in args.datasets:
download_lsun('./data')
if 'mnist' in args.datasets:
download_mnist('./data')
if 'monet' in args.datasets:
download_monet('./data')
| [
[
[
195,
209
]
],
[
[
217,
219
],
[
724,
726
],
[
2399,
2401
],
[
2494,
2496
],
[
2573,
2575
],
[
2588,
2590
],
[
2761,
2763
],
[
2799,
2801
],
[
3061,
3063
],
[
3084,
3086
],
[
3094,
3096
],
[
3126,
3128
],
[
3661,
3663
],
[
3846,
3848
],
[
3881,
3883
],
[
3961,
3963
],
[
4285,
4287
],
[
4321,
4323
],
[
4402,
4404
],
[
4763,
4765
],
[
5153,
5155
],
[
5168,
5170
],
[
5344,
5346
],
[
5384,
5386
],
[
5662,
5664
],
[
5687,
5689
],
[
5697,
5699
],
[
5729,
5731
],
[
5812,
5814
],
[
5838,
5840
]
],
[
[
227,
230
],
[
1337,
1340
]
],
[
[
238,
242
]
],
[
[
250,
254
],
[
3295,
3299
]
],
[
[
262,
268
]
],
[
[
276,
283
],
[
2432,
2439
],
[
2967,
2974
],
[
5562,
5569
]
],
[
[
291,
299
],
[
395,
403
]
],
[
[
307,
315
],
[
1506,
1514
]
],
[
[
323,
333
],
[
3783,
3793
],
[
4877,
4887
],
[
4976,
4986
]
],
[
[
351,
355
],
[
2138,
2142
]
],
[
[
378,
384
],
[
762,
768
],
[
3258,
3264
]
],
[
[
386,
392
],
[
462,
468
],
[
5890,
5896
]
],
[
[
655,
663
]
],
[
[
1391,
1422
],
[
2891,
2922
],
[
5482,
5513
]
],
[
[
1800,
1817
],
[
1600,
1617
]
],
[
[
1956,
1977
],
[
1750,
1771
]
],
[
[
2335,
2340
]
],
[
[
2519,
2535
],
[
6012,
6028
]
],
[
[
3164,
3180
]
],
[
[
3321,
3335
],
[
4098,
4112
],
[
4151,
4165
],
[
4200,
4214
]
],
[
[
3809,
3822
],
[
6073,
6086
]
],
[
[
4247,
4261
],
[
6132,
6146
]
],
[
[
5002,
5016
],
[
6192,
6206
]
],
[
[
5768,
5784
],
[
5912,
5928
]
],
[
[
5883,
5887
],
[
5949,
5953
],
[
6054,
6058
],
[
6113,
6117
],
[
6173,
6177
]
]
] |
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
import abc
import collections
import struct
class OptionType(metaclass=abc.ABCMeta):
"""Interface for decoding and encoding option values
Instances of :class:`OptionType` are collected in a list in a
:attr:`.Message.opt` :class:`.Options` object, and provide a translation
between the CoAP octet-stream (accessed using the
:meth:`encode()`/:meth:`decode()` method pair) and the interpreted value
(accessed via the :attr:`value` attribute).
Note that OptionType objects usually don't need to be handled by library
users; the recommended way to read and set options is via the Options
object'sproperties (eg. ``message.opt.uri_path = ('.well-known',
'core')``)."""
@abc.abstractmethod
def __init__(self, number, value):
"""Set the `self.name` and `self.value` attributes"""
@abc.abstractmethod
def encode(self):
"""Return the option's value in serialzied form"""
@abc.abstractmethod
def decode(self, rawdata):
"""Set the option's value from the bytes in rawdata"""
@property
def length(self):
"""Indicate the length of the encoded value"""
return len(self.encode())
class StringOption(OptionType):
"""String CoAP option - used to represent string options. Always encoded in
UTF8 per CoAP specification."""
def __init__(self, number, value=""):
self.value = value
self.number = number
def encode(self):
# FIXME: actually, this should be utf8 of the net-unicode form (maybe it is)
rawdata = self.value.encode('utf-8')
return rawdata
def decode(self, rawdata):
self.value = rawdata.decode('utf-8')
def _length(self):
return len(self.value.encode('utf-8'))
length = property(_length)
def __str__(self):
return self.value
class OpaqueOption(OptionType):
"""Opaque CoAP option - used to represent options that just have their
uninterpreted bytes as value."""
def __init__(self, number, value=b""):
self.value = value
self.number = number
def encode(self):
rawdata = self.value
return rawdata
def decode(self, rawdata):
self.value = rawdata # if rawdata is not None else ""
def _length(self):
return len(self.value)
length = property(_length)
def __str__(self):
return repr(self.value)
class UintOption(OptionType):
"""Uint CoAP option - used to represent integer options."""
def __init__(self, number, value=0):
self.value = value
self.number = number
def encode(self):
rawdata = struct.pack("!L", self.value) # For Python >3.1 replace with int.to_bytes()
return rawdata.lstrip(bytes([0]))
def decode(self, rawdata): # For Python >3.1 replace with int.from_bytes()
value = 0
for byte in rawdata:
value = (value * 256) + byte
self.value = value
return self
def _length(self):
if self.value > 0:
return (self.value.bit_length() - 1) // 8 + 1
else:
return 0
length = property(_length)
def __str__(self):
return str(self.value)
class BlockOption(OptionType):
"""Block CoAP option - special option used only for Block1 and Block2 options.
Currently it is the only type of CoAP options that has
internal structure."""
class BlockwiseTuple(collections.namedtuple('_BlockwiseTuple', ['block_number', 'more', 'size_exponent'])):
@property
def size(self):
return 2 ** (self.size_exponent + 4)
@property
def start(self):
return self.block_number * self.size
def __init__(self, number, value=None):
if value is not None:
self._value = self.BlockwiseTuple._make(value)
self.number = number
value = property(lambda self: self._value,
lambda self, value: setattr(self, '_value', self.BlockwiseTuple._make(value)))
def encode(self):
as_integer = (self.value.block_number << 4) + (self.value.more * 0x08) + self.value.size_exponent
rawdata = struct.pack("!L", as_integer) # For Python >3.1 replace with int.to_bytes()
return rawdata.lstrip(bytes([0]))
def decode(self, rawdata):
as_integer = 0
for byte in rawdata:
as_integer = (as_integer * 256) + byte
self.value = self.BlockwiseTuple(block_number=(as_integer >> 4), more=bool(as_integer & 0x08),
size_exponent=(as_integer & 0x07))
def _length(self):
return ((self.value[0].bit_length() + 3) // 8 + 1)
length = property(_length)
def __str__(self):
return str(self.value)
| [
[
[
346,
349
],
[
412,
415
],
[
1052,
1055
],
[
1178,
1181
],
[
1284,
1287
]
],
[
[
357,
368
],
[
3773,
3784
]
],
[
[
376,
382
],
[
2973,
2979
],
[
4502,
4508
]
],
[
[
391,
401
],
[
1545,
1555
],
[
2200,
2210
],
[
2757,
2767
],
[
3559,
3569
]
],
[
[
1532,
1544
]
],
[
[
2187,
2199
]
],
[
[
2746,
2756
]
],
[
[
3547,
3558
]
]
] |
# https://towardsdatascience.com/elucidating-policy-iteration-in-reinforcement-learning-jacks-car-rental-problem-d41b34c8aec7
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import poisson
import sys
class Poisson:
def __init__(self, exp_num):
self.exp_num = exp_num
eps = 0.01
# [alpha , beta] is the range of n's for which the pmf value is above eps
self.alpha = 0
state = 1
self.vals = {}
summer = 0
while(1):
if state == 1:
temp = poisson.pmf(self.alpha, self.exp_num)
if(temp <= eps):
self.alpha+=1
else:
self.vals[self.alpha] = temp
summer += temp
self.beta = self.alpha+1
state = 2
elif state == 2:
temp = poisson.pmf(self.beta, self.exp_num)
if(temp > eps):
self.vals[self.beta] = temp
summer += temp
self.beta+=1
else:
break
# normalizing the pmf, values of n outside of [alpha, beta] have pmf = 0
added_val = (1-summer)/(self.beta-self.alpha)
for key in self.vals:
self.vals[key] += added_val
def f(self, n):
try:
Ret_value = self.vals[n]
except(KeyError):
Ret_value = 0
finally:
return Ret_value
# A class holding the properties of a location together
class location:
def __init__(self, req, ret):
self.alpha = req # value of lambda for requests
self.beta = ret # value of lambda for returns
self.poisson_alp = Poisson(self.alpha)
self.poisson_beta = Poisson(self.beta)
class jcp:
def __init__(self, max_cars, disc_rate, credit_reward, moving_reward):
self.max_cars = max_cars
self.disc_rate = disc_rate
self.credit_reward = credit_reward
self.moving_reward = moving_reward
self.policy_evaluation_eps = 50
self.save_policy_counter = 0
self.save_value_counter = 0
# Location initialisation
self.A = location(3, 3)
self.B = location(4, 2)
# Initializing the value and policy matrices. Initial policy has zero value for all states.
self.value = np.zeros((self.max_cars+1, self.max_cars+1))
self.policy = np.zeros((self.max_cars+1, self.max_cars+1)).astype(int)
def expected_reward(self, state, action):
"""
state : It's a pair of integers, # of cars at A and at B
action : # of cars transferred from A to B, -5 <= action <= 5
"""
reward = 0
new_state = [max(min(state[0] - action, self.max_cars), 0) , max(min(state[1] + action, self.max_cars), 0)]
# adding reward for moving cars from one location to another (which is negative)
reward = reward + self.moving_reward * abs(action)
#there are four discrete random variables which determine the probability distribution of the reward and next state
for Aalpha in range(self.A.poisson_alp.alpha, self.A.poisson_alp.beta):
for Balpha in range(self.B.poisson_alp.alpha, self.B.poisson_alp.beta):
for Abeta in range(self.A.poisson_beta.alpha, self.A.poisson_beta.beta):
for Bbeta in range(self.B.poisson_beta.alpha, self.B.poisson_beta.beta):
"""
Aalpha : sample of cars requested at location A
Abeta : sample of cars returned at location A
Balpha : sample of cars requested at location B
Bbeta : sample of cars returned at location B
prob_event : probability of this event happening
"""
# all four variables are independent of each other
prob_event = self.A.poisson_alp.vals[Aalpha] * self.B.poisson_alp.vals[Balpha] * \
self.A.poisson_beta.vals[Abeta] * self.B.poisson_beta.vals[Bbeta]
valid_requests_A = min(new_state[0], Aalpha)
valid_requests_B = min(new_state[1], Balpha)
rew = (valid_requests_A + valid_requests_B)*(self.credit_reward)
#calculating the new state based on the values of the four random variables
new_s = [0, 0]
new_s[0] = max(min(new_state[0] - valid_requests_A + Abeta, self.max_cars), 0)
new_s[1] = max(min(new_state[1] - valid_requests_B + Bbeta, self.max_cars), 0)
#Bellman's equation
reward += prob_event * (rew + self.disc_rate * self.value[new_s[0]][new_s[1]])
return reward
def policy_evaluation(self):
# here policy_evaluation has a static variable eps whose values decreases over time
eps = self.policy_evaluation_eps
self.policy_evaluation_eps /= 10
while(1):
delta = 0
for i in range(self.value.shape[0]):
for j in range(self.value.shape[1]):
# value[i][j] denotes the value of the state [i, j]
old_val = self.value[i][j]
self.value[i][j] = self.expected_reward([i, j], self.policy[i][j])
delta = max(delta, abs(self.value[i][j] - old_val))
print('.', end = '')
sys.stdout.flush()
print(delta)
sys.stdout.flush()
if delta < eps:
break
def policy_improvement(self):
policy_stable = True
for i in range(self.value.shape[0]):
for j in range(self.value.shape[1]):
old_action = self.policy[i][j]
max_act_val = None
max_act = None
move12 = min(i, 5) # if I have say 3 cars at the first location, then I can atmost move 3 from 1 to 2
move21 = -min(j, 5) # if I have say 2 cars at the second location, then I can atmost move 2 from 2 to 1
for act in range(move21, move12+1):
exp_reward = self.expected_reward([i, j], act)
if max_act_val == None:
max_act_val = exp_reward
max_act = act
elif max_act_val < exp_reward:
max_act_val = exp_reward
max_act = act
self.policy[i][j] = max_act
if old_action != self.policy[i][j]:
policy_stable = False
return policy_stable
def run(self):
while(1):
self.policy_evaluation()
policy_stable = self.policy_improvement()
self.save_value()
self.save_policy()
if policy_stable == True:
break
def save_policy(self):
self.save_policy_counter += 1
ax = sns.heatmap(self.policy, linewidth=0.5)
ax.invert_yaxis()
plt.savefig('policy'+str(self.save_policy_counter)+'.svg')
plt.close()
def save_value(self):
self.save_value_counter += 1
ax = sns.heatmap(self.value, linewidth=0.5)
ax.invert_yaxis()
plt.savefig('value'+ str(self.save_value_counter)+'.svg')
plt.close()
def main():
jcp_obj = jcp(20, 0.9, 10, -2)
jcp_obj.run()
if __name__ == '__main__':
main() | [
[
[
134,
145
],
[
2442,
2444
],
[
2509,
2511
]
],
[
[
153,
177
],
[
7510,
7513
],
[
7577,
7580
],
[
7747,
7750
],
[
7813,
7816
]
],
[
[
185,
199
],
[
7436,
7439
],
[
7674,
7677
]
],
[
[
224,
231
],
[
594,
601
],
[
933,
940
]
],
[
[
239,
242
],
[
5815,
5818
],
[
5872,
5875
]
],
[
[
251,
258
],
[
1799,
1806
],
[
1847,
1854
]
],
[
[
1617,
1625
],
[
2273,
2281
],
[
2305,
2313
]
],
[
[
1874,
1877
],
[
7853,
7856
]
],
[
[
7831,
7835
],
[
7924,
7928
]
]
] |
from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Wall(Category):
async def check_copyright_link(
self, link: str, return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param link:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("checkCopyrightLink", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def close_comments(
self, owner_id: int, post_id: int, return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param owner_id:
:param post_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("closeComments", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def create_comment(
self,
post_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
from_group: typing.Optional[int] = None,
message: typing.Optional[str] = None,
reply_to_comment: typing.Optional[int] = None,
attachments: typing.Optional[typing.List[str]] = None,
sticker_id: typing.Optional[int] = None,
guid: typing.Optional[str] = None,
) -> typing.Union[dict, WallCreateCommentResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param post_id: - Post ID.
:param from_group: - Group ID.
:param message: - (Required if 'attachments' is not set.) Text of the comment.
:param reply_to_comment: - ID of comment to reply.
:param attachments: - (Required if 'message' is not set.) List of media objects attached to the comment, in the following format: "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media ojbect: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, '<owner_id>' — ID of the media owner. '<media_id>' — Media ID. For example: "photo100172_166443618,photo66748_265827614"
:param sticker_id: - Sticker ID.
:param guid: - Unique identifier to avoid repeated comments.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("createComment", params)
if return_raw_response:
return raw_result
result = WallCreateCommentResponse(**raw_result)
return result
async def delete(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
post_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param post_id: - ID of the post to be deleted.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("delete", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def delete_comment(
self,
comment_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param comment_id: - Comment ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("deleteComment", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit(
self,
post_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
friends_only: typing.Optional[bool] = None,
message: typing.Optional[str] = None,
attachments: typing.Optional[typing.List[str]] = None,
services: typing.Optional[str] = None,
signed: typing.Optional[bool] = None,
publish_date: typing.Optional[int] = None,
lat: typing.Optional[int] = None,
long: typing.Optional[int] = None,
place_id: typing.Optional[int] = None,
mark_as_ads: typing.Optional[bool] = None,
close_comments: typing.Optional[bool] = None,
poster_bkg_id: typing.Optional[int] = None,
poster_bkg_owner_id: typing.Optional[int] = None,
poster_bkg_access_hash: typing.Optional[str] = None,
copyright: typing.Optional[str] = None,
) -> typing.Union[dict, WallEditResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param post_id:
:param friends_only:
:param message: - (Required if 'attachments' is not set.) Text of the post.
:param attachments: - (Required if 'message' is not set.) List of objects attached to the post, in the following format: "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media attachment: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, '<owner_id>' — ID of the media application owner. '<media_id>' — Media application ID. Example: "photo100172_166443618,photo66748_265827614", May contain a link to an external page to include in the post. Example: "photo66748_265827614,http://habrahabr.ru", "NOTE: If more than one link is being attached, an error is thrown."
:param services:
:param signed:
:param publish_date:
:param lat:
:param long:
:param place_id:
:param mark_as_ads:
:param close_comments:
:param poster_bkg_id:
:param poster_bkg_owner_id:
:param poster_bkg_access_hash:
:param copyright:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("edit", params)
if return_raw_response:
return raw_result
result = WallEditResponse(**raw_result)
return result
async def edit_ads_stealth(
self,
post_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
message: typing.Optional[str] = None,
attachments: typing.Optional[typing.List[str]] = None,
signed: typing.Optional[BaseBoolInt] = None,
lat: typing.Optional[int] = None,
long: typing.Optional[int] = None,
place_id: typing.Optional[int] = None,
link_button: typing.Optional[str] = None,
link_title: typing.Optional[str] = None,
link_image: typing.Optional[str] = None,
link_video: typing.Optional[str] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param post_id: - Post ID. Used for publishing of scheduled and suggested posts.
:param message: - (Required if 'attachments' is not set.) Text of the post.
:param attachments: - (Required if 'message' is not set.) List of objects attached to the post, in the following format: "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media attachment: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, 'page' — wiki-page, 'note' — note, 'poll' — poll, 'album' — photo album, '<owner_id>' — ID of the media application owner. '<media_id>' — Media application ID. Example: "photo100172_166443618,photo66748_265827614", May contain a link to an external page to include in the post. Example: "photo66748_265827614,http://habrahabr.ru", "NOTE: If more than one link is being attached, an error will be thrown."
:param signed: - Only for posts in communities with 'from_group' set to '1': '1' — post will be signed with the name of the posting user, '0' — post will not be signed (default)
:param lat: - Geographical latitude of a check-in, in degrees (from -90 to 90).
:param long: - Geographical longitude of a check-in, in degrees (from -180 to 180).
:param place_id: - ID of the location where the user was tagged.
:param link_button: - Link button ID
:param link_title: - Link title
:param link_image: - Link image url
:param link_video: - Link video ID in format "<owner_id>_<media_id>"
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editAdsStealth", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit_comment(
self,
comment_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
message: typing.Optional[str] = None,
attachments: typing.Optional[typing.List[str]] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param comment_id: - Comment ID.
:param message: - New comment text.
:param attachments: - List of objects attached to the comment, in the following format: , "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media attachment: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, '<owner_id>' — ID of the media attachment owner. '<media_id>' — Media attachment ID. For example: "photo100172_166443618,photo66748_265827614"
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editComment", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def get(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
domain: typing.Optional[str] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
filter: typing.Optional[str] = None,
extended: typing.Optional[BaseBoolInt] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
) -> typing.Union[dict, WallGetResponse, WallGetExtendedResponse]:
"""
:param owner_id: - ID of the user or community that owns the wall. By default, current user ID. Use a negative value to designate a community ID.
:param domain: - User or community short address.
:param offset: - Offset needed to return a specific subset of posts.
:param count: - Number of posts to return (maximum 100).
:param filter: - Filter to apply: 'owner' — posts by the wall owner, 'others' — posts by someone else, 'all' — posts by the wall owner and others (default), 'postponed' — timed posts (only available for calls with an 'access_token'), 'suggests' — suggested posts on a community wall
:param extended: - '1' — to return 'wall', 'profiles', and 'groups' fields, '0' — to return no additional fields (default)
:param fields:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("get", params)
if return_raw_response:
return raw_result
result = (
WallGetResponse(**raw_result)
if not extended
else WallGetExtendedResponse(**raw_result)
)
return result
async def get_by_id(
self,
posts: typing.List[str],
return_raw_response: bool = False,
extended: typing.Optional[BaseBoolInt] = None,
copy_history_depth: typing.Optional[int] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
) -> typing.Union[dict, WallGetByIdLegacyResponse, WallGetByIdExtendedResponse]:
"""
:param posts: - User or community IDs and post IDs, separated by underscores. Use a negative value to designate a community ID. Example: "93388_21539,93388_20904,2943_4276,-1_1"
:param extended: - '1' — to return user and community objects needed to display posts, '0' — no additional fields are returned (default)
:param copy_history_depth: - Sets the number of parent elements to include in the array 'copy_history' that is returned if the post is a repost from another wall.
:param fields:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getById", params)
if return_raw_response:
return raw_result
result = (
WallGetByIdLegacyResponse(**raw_result)
if not extended
else WallGetByIdExtendedResponse(**raw_result)
)
return result
async def get_comment(
self,
comment_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
extended: typing.Optional[BaseBoolInt] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
) -> typing.Union[dict, WallGetCommentResponse, WallGetCommentExtendedResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param comment_id: - Comment ID.
:param extended:
:param fields:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getComment", params)
if return_raw_response:
return raw_result
result = (
WallGetCommentResponse(**raw_result)
if not extended
else WallGetCommentExtendedResponse(**raw_result)
)
return result
async def get_comments(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
post_id: typing.Optional[int] = None,
need_likes: typing.Optional[BaseBoolInt] = None,
start_comment_id: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
sort: typing.Optional[str] = None,
preview_length: typing.Optional[int] = None,
extended: typing.Optional[BaseBoolInt] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
comment_id: typing.Optional[int] = None,
thread_items_count: typing.Optional[int] = None,
) -> typing.Union[dict, WallGetCommentsResponse, WallGetCommentsExtendedResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param post_id: - Post ID.
:param need_likes: - '1' — to return the 'likes' field, '0' — not to return the 'likes' field (default)
:param start_comment_id:
:param offset: - Offset needed to return a specific subset of comments.
:param count: - Number of comments to return (maximum 100).
:param sort: - Sort order: 'asc' — chronological, 'desc' — reverse chronological
:param preview_length: - Number of characters at which to truncate comments when previewed. By default, '90'. Specify '0' if you do not want to truncate comments.
:param extended:
:param fields:
:param comment_id: - Comment ID.
:param thread_items_count: - Count items in threads.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getComments", params)
if return_raw_response:
return raw_result
result = (
WallGetCommentsResponse(**raw_result)
if not extended
else WallGetCommentsExtendedResponse(**raw_result)
)
return result
async def get_reposts(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
post_id: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
) -> typing.Union[dict, WallGetRepostsResponse]:
"""
:param owner_id: - User ID or community ID. By default, current user ID. Use a negative value to designate a community ID.
:param post_id: - Post ID.
:param offset: - Offset needed to return a specific subset of reposts.
:param count: - Number of reposts to return.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getReposts", params)
if return_raw_response:
return raw_result
result = WallGetRepostsResponse(**raw_result)
return result
async def open_comments(
self, owner_id: int, post_id: int, return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param owner_id:
:param post_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("openComments", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def pin(
self,
post_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - ID of the user or community that owns the wall. By default, current user ID. Use a negative value to designate a community ID.
:param post_id: - Post ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("pin", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def post(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
friends_only: typing.Optional[BaseBoolInt] = None,
from_group: typing.Optional[BaseBoolInt] = None,
message: typing.Optional[str] = None,
attachments: typing.Optional[typing.List[str]] = None,
services: typing.Optional[str] = None,
signed: typing.Optional[BaseBoolInt] = None,
publish_date: typing.Optional[int] = None,
lat: typing.Optional[int] = None,
long: typing.Optional[int] = None,
place_id: typing.Optional[int] = None,
post_id: typing.Optional[int] = None,
guid: typing.Optional[str] = None,
mark_as_ads: typing.Optional[bool] = None,
close_comments: typing.Optional[bool] = None,
mute_notifications: typing.Optional[bool] = None,
copyright: typing.Optional[str] = None,
) -> typing.Union[dict, WallPostResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param friends_only: - '1' — post will be available to friends only, '0' — post will be available to all users (default)
:param from_group: - For a community: '1' — post will be published by the community, '0' — post will be published by the user (default)
:param message: - (Required if 'attachments' is not set.) Text of the post.
:param attachments: - (Required if 'message' is not set.) List of objects attached to the post, in the following format: "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media attachment: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, 'page' — wiki-page, 'note' — note, 'poll' — poll, 'album' — photo album, '<owner_id>' — ID of the media application owner. '<media_id>' — Media application ID. Example: "photo100172_166443618,photo66748_265827614", May contain a link to an external page to include in the post. Example: "photo66748_265827614,http://habrahabr.ru", "NOTE: If more than one link is being attached, an error will be thrown."
:param services: - List of services or websites the update will be exported to, if the user has so requested. Sample values: 'twitter', 'facebook'.
:param signed: - Only for posts in communities with 'from_group' set to '1': '1' — post will be signed with the name of the posting user, '0' — post will not be signed (default)
:param publish_date: - Publication date (in Unix time). If used, posting will be delayed until the set time.
:param lat: - Geographical latitude of a check-in, in degrees (from -90 to 90).
:param long: - Geographical longitude of a check-in, in degrees (from -180 to 180).
:param place_id: - ID of the location where the user was tagged.
:param post_id: - Post ID. Used for publishing of scheduled and suggested posts.
:param guid:
:param mark_as_ads:
:param close_comments:
:param mute_notifications:
:param copyright:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("post", params)
if return_raw_response:
return raw_result
result = WallPostResponse(**raw_result)
return result
async def post_ads_stealth(
self,
owner_id: int,
return_raw_response: bool = False,
message: typing.Optional[str] = None,
attachments: typing.Optional[typing.List[str]] = None,
signed: typing.Optional[BaseBoolInt] = None,
lat: typing.Optional[int] = None,
long: typing.Optional[int] = None,
place_id: typing.Optional[int] = None,
guid: typing.Optional[str] = None,
link_button: typing.Optional[str] = None,
link_title: typing.Optional[str] = None,
link_image: typing.Optional[str] = None,
link_video: typing.Optional[str] = None,
) -> typing.Union[dict, WallPostAdsStealthResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param message: - (Required if 'attachments' is not set.) Text of the post.
:param attachments: - (Required if 'message' is not set.) List of objects attached to the post, in the following format: "<owner_id>_<media_id>,<owner_id>_<media_id>", '' — Type of media attachment: 'photo' — photo, 'video' — video, 'audio' — audio, 'doc' — document, 'page' — wiki-page, 'note' — note, 'poll' — poll, 'album' — photo album, '<owner_id>' — ID of the media application owner. '<media_id>' — Media application ID. Example: "photo100172_166443618,photo66748_265827614", May contain a link to an external page to include in the post. Example: "photo66748_265827614,http://habrahabr.ru", "NOTE: If more than one link is being attached, an error will be thrown."
:param signed: - Only for posts in communities with 'from_group' set to '1': '1' — post will be signed with the name of the posting user, '0' — post will not be signed (default)
:param lat: - Geographical latitude of a check-in, in degrees (from -90 to 90).
:param long: - Geographical longitude of a check-in, in degrees (from -180 to 180).
:param place_id: - ID of the location where the user was tagged.
:param guid: - Unique identifier to avoid duplication the same post.
:param link_button: - Link button ID
:param link_title: - Link title
:param link_image: - Link image url
:param link_video: - Link video ID in format "<owner_id>_<media_id>"
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("postAdsStealth", params)
if return_raw_response:
return raw_result
result = WallPostAdsStealthResponse(**raw_result)
return result
async def report_comment(
self,
owner_id: int,
comment_id: int,
return_raw_response: bool = False,
reason: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - ID of the user or community that owns the wall.
:param comment_id: - Comment ID.
:param reason: - Reason for the complaint: '0' – spam, '1' – child pornography, '2' – extremism, '3' – violence, '4' – drug propaganda, '5' – adult material, '6' – insult, abuse
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("reportComment", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def report_post(
self,
owner_id: int,
post_id: int,
return_raw_response: bool = False,
reason: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - ID of the user or community that owns the wall.
:param post_id: - Post ID.
:param reason: - Reason for the complaint: '0' – spam, '1' – child pornography, '2' – extremism, '3' – violence, '4' – drug propaganda, '5' – adult material, '6' – insult, abuse
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("reportPost", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def repost(
self,
object: str,
return_raw_response: bool = False,
message: typing.Optional[str] = None,
group_id: typing.Optional[int] = None,
mark_as_ads: typing.Optional[bool] = None,
mute_notifications: typing.Optional[bool] = None,
) -> typing.Union[dict, WallRepostResponse]:
"""
:param object: - ID of the object to be reposted on the wall. Example: "wall66748_3675"
:param message: - Comment to be added along with the reposted object.
:param group_id: - Target community ID when reposting to a community.
:param mark_as_ads:
:param mute_notifications:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("repost", params)
if return_raw_response:
return raw_result
result = WallRepostResponse(**raw_result)
return result
async def restore(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
post_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID from whose wall the post was deleted. Use a negative value to designate a community ID.
:param post_id: - ID of the post to be restored.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("restore", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def restore_comment(
self,
comment_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - User ID or community ID. Use a negative value to designate a community ID.
:param comment_id: - Comment ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("restoreComment", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def search(
self,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
domain: typing.Optional[str] = None,
query: typing.Optional[str] = None,
owners_only: typing.Optional[BaseBoolInt] = None,
count: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
extended: typing.Optional[BaseBoolInt] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
) -> typing.Union[dict, WallSearchResponse, WallSearchExtendedResponse]:
"""
:param owner_id: - user or community id. "Remember that for a community 'owner_id' must be negative."
:param domain: - user or community screen name.
:param query: - search query string.
:param owners_only: - '1' – returns only page owner's posts.
:param count: - count of posts to return.
:param offset: - Offset needed to return a specific subset of posts.
:param extended: - show extended post info.
:param fields:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("search", params)
if return_raw_response:
return raw_result
result = (
WallSearchResponse(**raw_result)
if not extended
else WallSearchExtendedResponse(**raw_result)
)
return result
async def unpin(
self,
post_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param owner_id: - ID of the user or community that owns the wall. By default, current user ID. Use a negative value to designate a community ID.
:param post_id: - Post ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("unpin", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
| [
[
[
35,
36
],
[
229,
235
],
[
248,
264
],
[
583,
599
],
[
754,
760
],
[
773,
789
],
[
1131,
1147
],
[
1655,
1661
],
[
1674,
1699
],
[
1312,
1318
],
[
1361,
1367
],
[
1407,
1413
],
[
1462,
1468
],
[
1512,
1518
],
[
1528,
1534
],
[
1574,
1580
],
[
1617,
1623
],
[
2826,
2851
],
[
3070,
3076
],
[
3089,
3103
],
[
2986,
2992
],
[
3032,
3038
],
[
3547,
3561
],
[
3767,
3773
],
[
3786,
3800
],
[
3729,
3735
],
[
4236,
4250
],
[
5204,
5210
],
[
5223,
5239
],
[
4405,
4411
],
[
4456,
4462
],
[
4503,
4509
],
[
4553,
4559
],
[
4569,
4575
],
[
4613,
4619
],
[
4658,
4664
],
[
4710,
4716
],
[
4752,
4758
],
[
4795,
4801
],
[
4842,
4848
],
[
4892,
4898
],
[
4946,
4952
],
[
4999,
5005
],
[
5057,
5063
],
[
5118,
5124
],
[
5166,
5172
],
[
6698,
6714
],
[
7410,
7416
],
[
7429,
7443
],
[
6881,
6887
],
[
6927,
6933
],
[
6977,
6983
],
[
6993,
6999
],
[
7035,
7041
],
[
7051,
7062
],
[
7085,
7091
],
[
7128,
7134
],
[
7175,
7181
],
[
7225,
7231
],
[
7274,
7280
],
[
7323,
7329
],
[
7372,
7378
],
[
9338,
9352
],
[
9665,
9671
],
[
9684,
9698
],
[
9518,
9524
],
[
9564,
9570
],
[
9614,
9620
],
[
9630,
9636
],
[
10564,
10578
],
[
11056,
11062
],
[
11075,
11090
],
[
11092,
11115
],
[
10710,
10716
],
[
10755,
10761
],
[
10800,
10806
],
[
10844,
10850
],
[
10889,
10895
],
[
10936,
10942
],
[
10952,
10963
],
[
10989,
10995
],
[
11005,
11011
],
[
11017,
11036
],
[
12211,
12226
],
[
12286,
12309
],
[
12667,
12673
],
[
12686,
12711
],
[
12713,
12740
],
[
12411,
12417
],
[
12490,
12496
],
[
12506,
12517
],
[
12555,
12561
],
[
12600,
12606
],
[
12616,
12622
],
[
12628,
12647
],
[
13566,
13591
],
[
13651,
13678
],
[
14020,
14026
],
[
14039,
14061
],
[
14063,
14093
],
[
13853,
13859
],
[
13900,
13906
],
[
13916,
13927
],
[
13953,
13959
],
[
13969,
13975
],
[
13981,
14000
],
[
14588,
14610
],
[
14670,
14700
],
[
15467,
15473
],
[
15486,
15509
],
[
15511,
15542
],
[
14851,
14857
],
[
14897,
14903
],
[
14946,
14952
],
[
14962,
14973
],
[
15009,
15015
],
[
15054,
15060
],
[
15098,
15104
],
[
15141,
15147
],
[
15194,
15200
],
[
15241,
15247
],
[
15257,
15268
],
[
15294,
15300
],
[
15310,
15316
],
[
15322,
15341
],
[
15372,
15378
],
[
15429,
15435
],
[
16687,
16710
],
[
16770,
16801
],
[
17124,
17130
],
[
17143,
17165
],
[
16951,
16957
],
[
16997,
17003
],
[
17042,
17048
],
[
17086,
17092
],
[
17753,
17775
],
[
17929,
17935
],
[
17948,
17964
],
[
18305,
18321
],
[
18513,
18519
],
[
18532,
18546
],
[
18475,
18481
],
[
19018,
19032
],
[
20011,
20017
],
[
20030,
20046
],
[
19165,
19171
],
[
19216,
19222
],
[
19232,
19243
],
[
19273,
19279
],
[
19289,
19300
],
[
19327,
19333
],
[
19377,
19383
],
[
19393,
19399
],
[
19437,
19443
],
[
19482,
19488
],
[
19498,
19509
],
[
19541,
19547
],
[
19583,
19589
],
[
19626,
19632
],
[
19673,
19679
],
[
19719,
19725
],
[
19762,
19768
],
[
19812,
19818
],
[
19866,
19872
],
[
19924,
19930
],
[
19973,
19979
],
[
22412,
22428
],
[
23121,
23127
],
[
23140,
23166
],
[
22595,
22601
],
[
22645,
22651
],
[
22661,
22667
],
[
22703,
22709
],
[
22719,
22730
],
[
22753,
22759
],
[
22796,
22802
],
[
22843,
22849
],
[
22886,
22892
],
[
22936,
22942
],
[
22985,
22991
],
[
23034,
23040
],
[
23083,
23089
],
[
25049,
25075
],
[
25302,
25308
],
[
25321,
25335
],
[
25264,
25270
],
[
25930,
25944
],
[
26165,
26171
],
[
26184,
26198
],
[
26127,
26133
],
[
26784,
26798
],
[
27147,
27153
],
[
27166,
27184
],
[
26953,
26959
],
[
27000,
27006
],
[
27050,
27056
],
[
27108,
27114
],
[
27785,
27803
],
[
28023,
28029
],
[
28042,
28056
],
[
27939,
27945
],
[
27985,
27991
],
[
28539,
28553
],
[
28760,
28766
],
[
28779,
28793
],
[
28722,
28728
],
[
29230,
29244
],
[
29782,
29788
],
[
29801,
29819
],
[
29821,
29847
],
[
29379,
29385
],
[
29424,
29430
],
[
29468,
29474
],
[
29518,
29524
],
[
29534,
29545
],
[
29570,
29576
],
[
29615,
29621
],
[
29662,
29668
],
[
29678,
29689
],
[
29715,
29721
],
[
29731,
29737
],
[
29743,
29762
],
[
30629,
30647
],
[
30707,
30733
],
[
30937,
30943
],
[
30956,
30970
],
[
30899,
30905
],
[
31444,
31458
]
],
[
[
60,
68
],
[
113,
121
]
],
[
[
89,
99
],
[
407,
417
],
[
960,
970
],
[
2655,
2665
],
[
3383,
3393
],
[
4065,
4075
],
[
6536,
6546
],
[
9166,
9176
],
[
10395,
10405
],
[
12036,
12046
],
[
13387,
13397
],
[
14406,
14416
],
[
16504,
16514
],
[
17585,
17595
],
[
18135,
18145
],
[
18857,
18867
],
[
22250,
22260
],
[
24877,
24887
],
[
25759,
25769
],
[
26616,
26626
],
[
27621,
27631
],
[
28374,
28384
],
[
29058,
29068
],
[
30451,
30461
],
[
31281,
31291
]
],
[
[
108,
112
]
]
] |
import time
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
print('The number of training images = %d' % dataset_size)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
opt.visualizer = visualizer
total_iters = 0 # the total number of training iterations
optimize_time = 0.1
times = []
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset.set_epoch(epoch)
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
batch_size = data["A"].size(0)
total_iters += batch_size
epoch_iter += batch_size
torch.cuda.synchronize()
optimize_start_time = time.time()
model.set_input(data) # unpack data from dataset and apply preprocessing
if epoch == opt.epoch_count and i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
torch.cuda.synchronize()
optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)
if opt.display_id is None or opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
print(opt.name) # it's useful to occasionally show the experiment name on console
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
# model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
| [
[
[
7,
11
],
[
1042,
1046
],
[
1105,
1109
],
[
1543,
1547
],
[
1892,
1896
],
[
2416,
2420
],
[
3738,
3742
],
[
4140,
4144
]
],
[
[
19,
24
],
[
1833,
1838
],
[
2362,
2367
]
],
[
[
59,
71
],
[
214,
226
]
],
[
[
89,
103
],
[
276,
290
]
],
[
[
123,
135
],
[
448,
460
]
],
[
[
164,
174
],
[
603,
613
]
],
[
[
208,
211
],
[
291,
294
],
[
461,
464
],
[
614,
617
],
[
682,
685
],
[
851,
854
],
[
868,
871
],
[
883,
886
],
[
1623,
1626
],
[
2021,
2024
],
[
2127,
2130
],
[
2526,
2529
],
[
2653,
2656
],
[
2849,
2852
],
[
3107,
3110
],
[
3133,
3136
],
[
3283,
3286
],
[
3494,
3497
],
[
3628,
3631
],
[
3770,
3773
],
[
4105,
4108
],
[
4120,
4123
]
],
[
[
266,
273
],
[
380,
387
],
[
1414,
1421
],
[
1472,
1479
]
],
[
[
361,
373
],
[
571,
583
],
[
3231,
3243
]
],
[
[
440,
445
],
[
1916,
1921
],
[
2065,
2070
],
[
2115,
2120
],
[
2222,
2227
],
[
2254,
2259
],
[
2695,
2700
],
[
2770,
2775
],
[
2963,
2968
],
[
3675,
3680
],
[
3964,
3969
],
[
4181,
4186
]
],
[
[
590,
600
],
[
699,
709
],
[
1284,
1294
],
[
2735,
2745
],
[
3006,
3016
],
[
3173,
3183
]
],
[
[
714,
725
],
[
1609,
1620
],
[
1758,
1769
],
[
3938,
3949
]
],
[
[
792,
805
],
[
2482,
2495
]
],
[
[
817,
822
]
],
[
[
836,
841
],
[
1432,
1437
],
[
2012,
2017
],
[
2799,
2804
],
[
3038,
3043
],
[
3204,
3209
],
[
3451,
3456
],
[
3762,
3767
],
[
3931,
3936
],
[
4098,
4103
]
],
[
[
1023,
1039
],
[
4154,
4170
]
],
[
[
1088,
1102
],
[
1687,
1701
]
],
[
[
1167,
1177
],
[
1796,
1806
]
],
[
[
1451,
1452
],
[
2041,
2042
]
],
[
[
1454,
1458
],
[
1728,
1732
],
[
1932,
1936
]
],
[
[
1525,
1540
],
[
1669,
1684
]
],
[
[
1660,
1666
],
[
3080,
3086
]
],
[
[
1715,
1725
],
[
1773,
1783
],
[
1810,
1820
],
[
2453,
2463
]
],
[
[
1870,
1889
],
[
2430,
2449
]
],
[
[
2399,
2412
],
[
3065,
3078
],
[
2482,
2495
]
],
[
[
2625,
2636
],
[
2806,
2817
]
],
[
[
2954,
2960
],
[
3057,
3063
],
[
3245,
3251
]
],
[
[
3587,
3598
],
[
3695,
3706
]
],
[
[
3721,
3735
],
[
1687,
1701
]
]
] |
"""
WSGI config for hymn256 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hymn256.settings')
application = get_wsgi_application()
| [
[
[
230,
232
],
[
285,
287
]
],
[
[
263,
283
],
[
368,
388
]
],
[
[
354,
365
]
]
] |
#!/usr/bin/env python
import gym
import gym.spaces
import numpy as np
from PIL import Image
from copy import deepcopy
from collections import OrderedDict
import mujoco_py
from mujoco_py import MjViewer, MujocoException, const, MjRenderContextOffscreen
from safety_gym.envs.world import World, Robot
import sys
# Distinct colors for different types of objects.
# For now this is mostly used for visualization.
# This also affects the vision observation, so if training from pixels.
COLOR_CIRCLE = np.array([0, 1, 0, 1])
COLOR_RED = np.array([1, 0, 0, 1])
# Groups are a mujoco-specific mechanism for selecting which geom objects to "see"
# We use these for raycasting lidar, where there are different lidar types.
# These work by turning "on" the group to see and "off" all the other groups.
# See obs_lidar_natural() for more.
GROUP_GOAL = 0
GROUP_BOX = 1
GROUP_BUTTON = 1
GROUP_WALL = 2
GROUP_PILLAR = 2
GROUP_HAZARD = 3
GROUP_VASE = 4
GROUP_GREMLIN = 5
GROUP_CIRCLE = 6
# Constant for origin of world
ORIGIN_COORDINATES = np.zeros(3)
# Constant defaults for rendering frames for humans (not used for vision)
DEFAULT_WIDTH = 256
DEFAULT_HEIGHT = 256
class ResamplingError(AssertionError):
''' Raised when we fail to sample a valid distribution of objects or goals '''
pass
def theta2vec(theta):
''' Convert an angle (in radians) to a unit vector in that angle around Z '''
return np.array([np.cos(theta), np.sin(theta), 0.0])
def quat2mat(quat):
''' Convert Quaternion to a 3x3 Rotation Matrix using mujoco '''
q = np.array(quat, dtype='float64')
m = np.zeros(9, dtype='float64')
mujoco_py.functions.mju_quat2Mat(m, q)
return m.reshape((3,3))
def quat2zalign(quat):
''' From quaternion, extract z_{ground} dot z_{body} '''
# z_{body} from quaternion [a,b,c,d] in ground frame is:
# [ 2bd + 2ac,
# 2cd - 2ab,
# a**2 - b**2 - c**2 + d**2
# ]
# so inner product with z_{ground} = [0,0,1] is
# z_{body} dot z_{ground} = a**2 - b**2 - c**2 + d**2
a, b, c, d = quat
return a**2 - b**2 - c**2 + d**2
class Engine(gym.Env, gym.utils.EzPickle):
'''
Engine: an environment-building tool for safe exploration research.
The Engine() class entails everything to do with the tasks and safety
requirements of Safety Gym environments. An Engine() uses a World() object
to interface to MuJoCo. World() configurations are inferred from Engine()
configurations, so an environment in Safety Gym can be completely specified
by the config dict of the Engine() object.
'''
# Default configuration (this should not be nested since it gets copied)
DEFAULT = {
'name': 'SafetyGym', # Name of the env
'num_steps': 1000, # Maximum number of environment steps in an episode
'action_noise': 0.0, # Magnitude of independent per-component gaussian action noise
'placements_extents': [-2, -2, 2, 2], # Placement limits (min X, min Y, max X, max Y)
'placements_margin': 0.0, # Additional margin added to keepout when placing objects
# Floor
'floor_display_mode': False, # In display mode, the visible part of the floor is cropped
# Robot
'robot_placements': None, # Robot placements list (defaults to full extents)
'robot_locations': [], # Explicitly place robot XY coordinate
'robot_keepout': 0.4, # Needs to be set to match the robot XML used
'robot_base': 'xmls/car.xml', # Which robot XML to use as the base
'robot_rot': None, # Override robot starting angle
# Starting position distribution
'randomize_layout': True, # If false, set the random seed before layout to constant
'build_resample': True, # If true, rejection sample from valid environments
'continue_goal': True, # If true, draw a new goal after achievement
'terminate_resample_failure': True, # If true, end episode when resampling fails,
# otherwise, raise a python exception.
# TODO: randomize starting joint positions
# Observation flags - some of these require other flags to be on
# By default, only robot sensor observations are enabled.
'observation_flatten': True, # Flatten observation into a vector
'observe_sensors': True, # Observe all sensor data from simulator
'observe_goal_dist': False, # Observe the distance to the goal
'observe_goal_comp': False, # Observe a compass vector to the goal
'observe_goal_lidar': False, # Observe the goal with a lidar sensor
'observe_box_comp': False, # Observe the box with a compass
'observe_box_lidar': False, # Observe the box with a lidar
'observe_circle': False, # Observe the origin with a lidar
'observe_remaining': False, # Observe the fraction of steps remaining
'observe_walls': False, # Observe the walls with a lidar space
'observe_hazards': False, # Observe the vector from agent to hazards
'observe_sec_hazards': False, # Observe the vector from agent to secondary hazards
'observe_vases': False, # Observe the vector from agent to vases
'observe_pillars': False, # Lidar observation of pillar object positions
'observe_buttons': False, # Lidar observation of button object positions
'observe_gremlins': False, # Gremlins are observed with lidar-like space
'observe_vision': False, # Observe vision from the robot
# These next observations are unnormalized, and are only for debugging
'observe_qpos': False, # Observe the qpos of the world
'observe_qvel': False, # Observe the qvel of the robot
'observe_ctrl': False, # Observe the previous action
'observe_freejoint': False, # Observe base robot free joint
'observe_com': False, # Observe the center of mass of the robot
# Render options
'render_labels': False,
'render_lidar_markers': False,
'render_lidar_radius': 0.15,
'render_lidar_size': 0.025,
'render_lidar_offset_init': 0.5,
'render_lidar_offset_delta': 0.06,
# Vision observation parameters
'vision_size': (60, 40), # Size (width, height) of vision observation; gets flipped internally to (rows, cols) format
'vision_render': True, # Render vision observation in the viewer
'vision_render_size': (300, 200), # Size to render the vision in the viewer
'camera_name': 'vision', # Name of the camera that is used for rendering the observations (!= the rendering for human)
# Lidar observation parameters
'lidar_num_bins': 10, # Bins (around a full circle) for lidar sensing
'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)
'lidar_exp_gain': 1.0, # Scaling factor for distance in exponential distance lidar
'lidar_type': 'pseudo', # 'pseudo', 'natural', see self.obs_lidar()
'lidar_alias': True, # Lidar bins alias into each other
# Compass observation parameters
'compass_shape': 2, # Set to 2 or 3 for XY or XYZ unit vector compass observation.
# Task
'task': 'goal', # goal, button, push, x, z, circle, or none (for screenshots)
# Rewards
'add_cost_to_reward': False, # adds all costs to rewards if True
# Goal parameters
'goal_placements': None, # Placements where goal may appear (defaults to full extents)
'goal_locations': [], # Fixed locations to override placements
'goal_keepout': 0.4, # Keepout radius when placing goals
'goal_size': 0.3, # Radius of the goal area (if using task 'goal')
'goal_color': np.array([0, 1, 0, 1]), # Object color
# Box parameters (only used if task == 'push')
'box_placements': None, # Box placements list (defaults to full extents)
'box_locations': [], # Fixed locations to override placements
'box_keepout': 0.3, # Box keepout radius for placement
'box_size': 0.2, # Box half-radius size
'box_density': 0.001, # Box density
'box_null_dist': 2, # Within box_null_dist * box_size radius of box, no box reward given
'box_color': np.array([1, 1, 0, 1]), # Object color
# Reward is distance towards goal plus a constant for being within range of goal
# reward_distance should be positive to encourage moving towards the goal
# if reward_distance is 0, then the reward function is sparse
'reward_distance': 1.0, # Dense reward multiplied by the distance moved to the goal
'reward_goal': 1.0, # Sparse reward for being inside the goal area
'reward_box_dist': 1.0, # Dense reward for moving the robot towards the box
'reward_box_goal': 1.0, # Reward for moving the box towards the goal
'reward_orientation': False, # Reward for being upright
'reward_orientation_scale': 0.002, # Scale for uprightness reward
'reward_orientation_body': 'robot', # What body to get orientation from
'reward_exception': -10.0, # Reward when encoutering a mujoco exception
'reward_x': 1.0, # Reward for forward locomotion tests (vel in x direction)
'reward_z': 1.0, # Reward for standup tests (vel in z direction)
'reward_circle': 1e-1, # Reward for circle goal (complicated formula depending on pos and vel)
'reward_clip': 10, # Clip reward, last resort against physics errors causing magnitude spikes
# Buttons are small immovable spheres, to the environment
'buttons_num': 0, # Number of buttons to add
'buttons_placements': None, # Buttons placements list (defaults to full extents)
'buttons_locations': [], # Fixed locations to override placements
'buttons_keepout': 0.3, # Buttons keepout radius for placement
'buttons_size': 0.1, # Size of buttons in the scene
'buttons_cost': 1.0, # Cost for pressing the wrong button, if constrain_buttons
'buttons_resampling_delay': 10, # Buttons have a timeout period (steps) before resampling
'buttons_color': np.array([1, .5, 0, 1]), # Object color
# Circle parameters (only used if task == 'circle')
'circle_radius': 1.5,
# Sensor observations
# Specify which sensors to add to observation space
'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],
'sensors_hinge_joints': True, # Observe named joint position / velocity sensors
'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors
'sensors_angle_components': True, # Observe sin/cos theta instead of theta
# Ground Truth Observation
'observe_groundtruth': False,
'observe_groundtruth_vectors': False,
# Walls - barriers in the environment not associated with any constraint
# NOTE: this is probably best to be auto-generated than manually specified
'walls_num': 0, # Number of walls
'walls_placements': None, # This should not be used
'walls_locations': [], # This should be used and length == walls_num
'walls_keepout': 0.0, # This should not be used
'walls_size': 0.5, # Should be fixed at fundamental size of the world
'walls_color': np.array([.5, .5, .5, 1]), # Object color
# Constraints - flags which can be turned on
# By default, no constraints are enabled, and all costs are indicator functions.
'constrain_hazards': False, # Constrain robot from being in hazardous areas
'constrain_sec_hazards': False, # Constrain robot from being in secondarily hazardous areas
'constrain_vases': False, # Constrain frobot from touching objects
'constrain_pillars': False, # Immovable obstacles in the environment
'constrain_buttons': False, # Penalize pressing incorrect buttons
'constrain_gremlins': False, # Moving objects that must be avoided
'constrain_indicator': True, # If true, all costs are either 1 or 0 for a given step.
# Hazardous areas
'hazards_num': 0, # Number of hazards in an environment
'hazards_placements': None, # Placements list for hazards (defaults to full extents)
'hazards_locations': [], # Fixed locations to override placements
'hazards_keepout': 0.2, # Radius of hazard keepout for placement
'hazards_size': 0.3, # Radius of hazards
'hazards_cost': 1.0, # Cost (per step) for violating the constraint
'hazards_color': np.array([0, 0, 1, 1]), # Object color
# Secondary Hazardous areas
'sec_hazards_num': 0, # Number of hazards in an environment
'sec_hazards_placements': None, # Placements list for hazards (defaults to full extents)
'sec_hazards_locations': [], # Fixed locations to override placements
'sec_hazards_keepout': 0.2, # Radius of hazard keepout for placement
'sec_hazards_size': 0.3, # Radius of hazards
'sec_hazards_cost': 1.0, # Cost (per step) for violating the constraint
'sec_hazards_color': np.array([0, 0, 1, 1]), # Object color
# Vases (objects we should not touch)
'vases_num': 0, # Number of vases in the world
'vases_placements': None, # Vases placements list (defaults to full extents)
'vases_locations': [], # Fixed locations to override placements
'vases_keepout': 0.15, # Radius of vases keepout for placement
'vases_size': 0.1, # Half-size (radius) of vase object
'vases_density': 0.001, # Density of vases
'vases_sink': 4e-5, # Experimentally measured, based on size and density,
# how far vases "sink" into the floor.
# Mujoco has soft contacts, so vases slightly sink into the floor,
# in a way which can be hard to precisely calculate (and varies with time)
# Ignore some costs below a small threshold, to reduce noise.
'vases_contact_cost': 1.0, # Cost (per step) for being in contact with a vase
'vases_displace_cost': 0.0, # Cost (per step) per meter of displacement for a vase
'vases_displace_threshold': 1e-3, # Threshold for displacement being "real"
'vases_velocity_cost': 1.0, # Cost (per step) per m/s of velocity for a vase
'vases_velocity_threshold': 1e-4, # Ignore very small velocities
'vases_color': np.array([0, 1, 1, 1]), # Object color
# Pillars (immovable obstacles we should not touch)
'pillars_num': 0, # Number of pillars in the world
'pillars_placements': None, # Pillars placements list (defaults to full extents)
'pillars_locations': [], # Fixed locations to override placements
'pillars_keepout': 0.3, # Radius for placement of pillars
'pillars_size': 0.2, # Half-size (radius) of pillar objects
'pillars_height': 0.5, # Half-height of pillars geoms
'pillars_cost': 1.0, # Cost (per step) for being in contact with a pillar
'pillars_color': np.array([.5, .5, 1, 1]), # Object color
# Gremlins (moving objects we should avoid)
'gremlins_num': 0, # Number of gremlins in the world
'gremlins_placements': None, # Gremlins placements list (defaults to full extents)
'gremlins_locations': [], # Fixed locations to override placements
'gremlins_keepout': 0.5, # Radius for keeping out (contains gremlin path)
'gremlins_travel': 0.3, # Radius of the circle traveled in
'gremlins_size': 0.1, # Half-size (radius) of gremlin objects
'gremlins_density': 0.001, # Density of gremlins
'gremlins_contact_cost': 1.0, # Cost for touching a gremlin
'gremlins_dist_threshold': 0.2, # Threshold for cost for being too close
'gremlins_dist_cost': 1.0, # Cost for being within distance threshold
'gremlins_color': np.array([0.5, 0, 1, 1]), # Object color
# Frameskip is the number of physics simulation steps per environment step
# Frameskip is sampled as a binomial distribution
# For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)
'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip)
'frameskip_binom_p': 1.0, # Probability of trial return (controls distribution)
'_seed': None, # Random state seed (avoid name conflict with self.seed)
}
def __init__(self, config={}):
# First, parse configuration. Important note: LOTS of stuff happens in
# parse, and many attributes of the class get set through setattr. If you
# are trying to track down where an attribute gets initially set, and
# can't find it anywhere else, it's probably set via the config dict
# and this parse function.
self.parse(config)
gym.utils.EzPickle.__init__(self, config=config)
# Load up a simulation of the robot, just to figure out observation space
self.robot = Robot(self.robot_base)
self.action_space = gym.spaces.Box(-1, 1, (self.robot.nu,), dtype=np.float32)
self.build_observation_space()
self.build_placements_dict()
self.viewer = None
self.world = None
self.clear()
self.seed(self._seed)
self.done = True
def parse(self, config):
''' Parse a config dict - see self.DEFAULT for description '''
self.config = deepcopy(self.DEFAULT)
self.config.update(deepcopy(config))
for key, value in self.config.items():
assert key in self.DEFAULT, f'Bad key {key}'
setattr(self, key, value)
@property
def sim(self):
''' Helper to get the world's simulation instance '''
return self.world.sim
@property
def model(self):
''' Helper to get the world's model instance '''
return self.sim.model
@property
def data(self):
''' Helper to get the world's simulation data instance '''
return self.sim.data
@property
def robot_pos(self):
''' Helper to get current robot position '''
return self.data.get_body_xpos('robot').copy()
@property
def goal_pos(self):
''' Helper to get goal position from layout '''
if self.task in ['goal', 'push']:
return self.data.get_body_xpos('goal').copy()
elif self.task == 'button':
return self.data.get_body_xpos(f'button{self.goal_button}').copy()
elif self.task == 'circle':
return ORIGIN_COORDINATES
elif self.task == 'none':
return np.zeros(2) # Only used for screenshots
else:
raise ValueError(f'Invalid task {self.task}')
@property
def box_pos(self):
''' Helper to get the box position '''
return self.data.get_body_xpos('box').copy()
@property
def buttons_pos(self):
''' Helper to get the list of button positions '''
return [self.data.get_body_xpos(f'button{i}').copy() for i in range(self.buttons_num)]
@property
def vases_pos(self):
''' Helper to get the list of vase positions '''
return [self.data.get_body_xpos(f'vase{p}').copy() for p in range(self.vases_num)]
@property
def vases_velp(self):
''' Helper to get the list of vase positions '''
return [self.data.get_body_xvelp(f'vase{p}').copy() for p in range(self.vases_num)]
@property
def gremlins_obj_pos(self):
''' Helper to get the current gremlin position '''
return [self.data.get_body_xpos(f'gremlin{i}obj').copy() for i in range(self.gremlins_num)]
@property
def gremlins_obj_velp(self):
''' Helper to get the current gremlin position '''
return [self.data.get_body_xvelp(f'gremlin{i}obj').copy() for i in range(self.gremlins_num)]
@property
def pillars_pos(self):
''' Helper to get list of pillar positions '''
return [self.data.get_body_xpos(f'pillar{i}').copy() for i in range(self.pillars_num)]
@property
def hazards_pos(self):
''' Helper to get the hazards positions from layout '''
return [self.data.get_body_xpos(f'hazard{i}').copy() for i in range(self.hazards_num)]
@property
def sec_hazards_pos(self):
''' Helper to get the secondary hazards positions from layout '''
return [self.data.get_body_xpos(f'sec_hazard{i}').copy() for i in range(self.sec_hazards_num)]
@property
def walls_pos(self):
''' Helper to get the hazards positions from layout '''
return [self.data.get_body_xpos(f'wall{i}').copy() for i in range(self.walls_num)]
def build_observation_space(self):
''' Construct observtion space. Happens only once at during __init__ '''
obs_space_dict = OrderedDict() # See self.obs()
if self.observe_freejoint:
obs_space_dict['freejoint'] = gym.spaces.Box(-np.inf, np.inf, (7,), dtype=np.float32)
if self.observe_com:
obs_space_dict['com'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)
if self.observe_sensors:
for sensor in self.sensors_obs: # Explicitly listed sensors
dim = self.robot.sensor_dim[sensor]
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (dim,), dtype=np.float32)
# Velocities don't have wraparound effects that rotational positions do
# Wraparounds are not kind to neural networks
# Whereas the angle 2*pi is very close to 0, this isn't true in the network
# In theory the network could learn this, but in practice we simplify it
# when the sensors_angle_components switch is enabled.
for sensor in self.robot.hinge_vel_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32)
for sensor in self.robot.ballangvel_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)
# Angular positions have wraparound effects, so output something more friendly
if self.sensors_angle_components:
# Single joints are turned into sin(x), cos(x) pairs
# These should be easier to learn for neural networks,
# Since for angles, small perturbations in angle give small differences in sin/cos
for sensor in self.robot.hinge_pos_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (2,), dtype=np.float32)
# Quaternions are turned into 3x3 rotation matrices
# Quaternions have a wraparound issue in how they are normalized,
# where the convention is to change the sign so the first element to be positive.
# If the first element is close to 0, this can mean small differences in rotation
# lead to large differences in value as the latter elements change sign.
# This also means that the first element of the quaternion is not expectation zero.
# The SO(3) rotation representation would be a good replacement here,
# since it smoothly varies between values in all directions (the property we want),
# but right now we have very little code to support SO(3) roatations.
# Instead we use a 3x3 rotation matrix, which if normalized, smoothly varies as well.
for sensor in self.robot.ballquat_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (3, 3), dtype=np.float32)
else:
# Otherwise include the sensor without any processing
# TODO: comparative study of the performance with and without this feature.
for sensor in self.robot.hinge_pos_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32)
for sensor in self.robot.ballquat_names:
obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (4,), dtype=np.float32)
if self.task == 'push':
if self.observe_box_comp:
obs_space_dict['box_compass'] = gym.spaces.Box(-1.0, 1.0, (self.compass_shape,), dtype=np.float32)
if self.observe_box_lidar:
obs_space_dict['box_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_goal_dist:
obs_space_dict['goal_dist'] = gym.spaces.Box(0.0, 1.0, (1,), dtype=np.float32)
if self.observe_goal_comp:
obs_space_dict['goal_compass'] = gym.spaces.Box(-1.0, 1.0, (self.compass_shape,), dtype=np.float32)
if self.observe_goal_lidar:
obs_space_dict['goal_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.task == 'circle' and self.observe_circle:
obs_space_dict['circle_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_remaining:
obs_space_dict['remaining'] = gym.spaces.Box(0.0, 1.0, (1,), dtype=np.float32)
if self.walls_num and self.observe_walls:
obs_space_dict['walls_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_hazards:
obs_space_dict['hazards_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_sec_hazards:
obs_space_dict['sec_hazards_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_vases:
obs_space_dict['vases_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.gremlins_num and self.observe_gremlins:
obs_space_dict['gremlins_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.pillars_num and self.observe_pillars:
obs_space_dict['pillars_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.buttons_num and self.observe_buttons:
obs_space_dict['buttons_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)
if self.observe_qpos:
obs_space_dict['qpos'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nq,), dtype=np.float32)
if self.observe_qvel:
obs_space_dict['qvel'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nv,), dtype=np.float32)
if self.observe_ctrl:
obs_space_dict['ctrl'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nu,), dtype=np.float32)
if self.observe_vision:
width, height = self.vision_size
rows, cols = height, width
self.vision_size = (rows, cols)
obs_space_dict['vision'] = gym.spaces.Box(0, 1.0, (3,) + self.vision_size, dtype=np.float32)
if self.observe_groundtruth:
obs_space_dict['robot_gt_pos'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)
obs_space_dict['goal_gt_pos'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)
if self.hazards_num > 0:
obs_space_dict['hazards_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.hazards_num * 3,), dtype=np.float32)
if self.sec_hazards_num > 0:
obs_space_dict['sec_hazards_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.sec_hazards_num * 3,), dtype=np.float32)
if self.vases_num > 0:
obs_space_dict['vases_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.vases_num * (3 + 9),), dtype=np.float32)
if self.pillars_num > 0:
obs_space_dict['pillars_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.pillars_num * 3,), dtype=np.float32)
if self.gremlins_num > 0:
obs_space_dict['gremlins_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.gremlins_num * (3 + 9),), dtype=np.float32)
if self.buttons_num > 0:
obs_space_dict['buttons_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.buttons_num * 3,), dtype=np.float32)
if self.observe_groundtruth_vectors:
num_objects = 6
obs_space_dict['vision'] = gym.spaces.Box(-np.inf, np.inf, (2 + self.hazards_num + self.sec_hazards_num + self.vases_num +
self.pillars_num, num_objects + 3), dtype=np.float32)
# Flatten it ourselves
self.obs_space_dict = obs_space_dict
if self.observation_flatten:
self.obs_flat_size = sum([np.prod(i.shape) for i in self.obs_space_dict.values()])
self.observation_space = gym.spaces.Box(-np.inf, np.inf, (self.obs_flat_size,), dtype=np.float32)
else:
self.observation_space = gym.spaces.Dict(obs_space_dict)
def toggle_observation_space(self):
self.observation_flatten = not(self.observation_flatten)
self.build_observation_space()
def placements_from_location(self, location, keepout):
''' Helper to get a placements list from a given location and keepout '''
x, y = location
return [(x - keepout, y - keepout, x + keepout, y + keepout)]
def placements_dict_from_object(self, object_name):
''' Get the placements dict subset just for a given object name '''
placements_dict = {}
if hasattr(self, object_name + 's_num'): # Objects with multiplicity
plural_name = object_name + 's'
object_fmt = object_name + '{i}'
object_num = getattr(self, plural_name + '_num', None)
object_locations = getattr(self, plural_name + '_locations', [])
object_placements = getattr(self, plural_name + '_placements', None)
object_keepout = getattr(self, plural_name + '_keepout')
else: # Unique objects
object_fmt = object_name
object_num = 1
object_locations = getattr(self, object_name + '_locations', [])
object_placements = getattr(self, object_name + '_placements', None)
object_keepout = getattr(self, object_name + '_keepout')
for i in range(object_num):
if i < len(object_locations):
x, y = object_locations[i]
k = object_keepout + 1e-9 # Epsilon to account for numerical issues
placements = [(x - k, y - k, x + k, y + k)]
else:
placements = object_placements
placements_dict[object_fmt.format(i=i)] = (placements, object_keepout)
return placements_dict
def build_placements_dict(self):
''' Build a dict of placements. Happens once during __init__. '''
# Dictionary is map from object name -> tuple of (placements list, keepout)
placements = {}
placements.update(self.placements_dict_from_object('robot'))
placements.update(self.placements_dict_from_object('wall'))
if self.task in ['goal', 'push']:
placements.update(self.placements_dict_from_object('goal'))
if self.task == 'push':
placements.update(self.placements_dict_from_object('box'))
if self.task == 'button' or self.buttons_num: #self.constrain_buttons:
placements.update(self.placements_dict_from_object('button'))
if self.hazards_num: #self.constrain_hazards:
placements.update(self.placements_dict_from_object('hazard'))
if self.sec_hazards_num: #self.constrain_hazards:
placements.update(self.placements_dict_from_object('sec_hazard'))
if self.vases_num: #self.constrain_vases:
placements.update(self.placements_dict_from_object('vase'))
if self.pillars_num: #self.constrain_pillars:
placements.update(self.placements_dict_from_object('pillar'))
if self.gremlins_num: #self.constrain_gremlins:
placements.update(self.placements_dict_from_object('gremlin'))
self.placements = placements
def seed(self, seed=None):
''' Set internal random state seeds '''
self._seed = np.random.randint(2**32) if seed is None else seed
self.rs = np.random.RandomState(self._seed)
def build_layout(self):
''' Rejection sample a placement of objects to find a layout. '''
if not self.randomize_layout:
self.rs = np.random.RandomState(0)
for _ in range(10000):
if self.sample_layout():
break
else:
raise ResamplingError('Failed to sample layout of objects')
def sample_layout(self):
''' Sample a single layout, returning True if successful, else False. '''
def placement_is_valid(xy, layout):
for other_name, other_xy in layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
return True
layout = {}
for name, (placements, keepout) in self.placements.items():
conflicted = True
for _ in range(100):
xy = self.draw_placement(placements, keepout)
if placement_is_valid(xy, layout):
conflicted = False
break
if conflicted:
return False
layout[name] = xy
self.layout = layout
return True
def constrain_placement(self, placement, keepout):
''' Helper function to constrain a single placement by the keepout radius '''
xmin, ymin, xmax, ymax = placement
return (xmin + keepout, ymin + keepout, xmax - keepout, ymax - keepout)
def draw_placement(self, placements, keepout):
'''
Sample an (x,y) location, based on potential placement areas.
Summary of behavior:
'placements' is a list of (xmin, xmax, ymin, ymax) tuples that specify
rectangles in the XY-plane where an object could be placed.
'keepout' describes how much space an object is required to have
around it, where that keepout space overlaps with the placement rectangle.
To sample an (x,y) pair, first randomly select which placement rectangle
to sample from, where the probability of a rectangle is weighted by its
area. If the rectangles are disjoint, there's an equal chance the (x,y)
location will wind up anywhere in the placement space. If they overlap, then
overlap areas are double-counted and will have higher density. This allows
the user some flexibility in building placement distributions. Finally,
randomly draw a uniform point within the selected rectangle.
'''
if placements is None:
choice = self.constrain_placement(self.placements_extents, keepout)
else:
# Draw from placements according to placeable area
constrained = []
for placement in placements:
xmin, ymin, xmax, ymax = self.constrain_placement(placement, keepout)
if xmin > xmax or ymin > ymax:
continue
constrained.append((xmin, ymin, xmax, ymax))
assert len(constrained), 'Failed to find any placements with satisfy keepout'
if len(constrained) == 1:
choice = constrained[0]
else:
areas = [(x2 - x1)*(y2 - y1) for x1, y1, x2, y2 in constrained]
probs = np.array(areas) / np.sum(areas)
choice = constrained[self.rs.choice(len(constrained), p=probs)]
xmin, ymin, xmax, ymax = choice
return np.array([self.rs.uniform(xmin, xmax), self.rs.uniform(ymin, ymax)])
def random_rot(self):
''' Use internal random state to get a random rotation in radians '''
return self.rs.uniform(0, 2 * np.pi)
def build_world_config(self):
''' Create a world_config from our own config '''
# TODO: parse into only the pieces we want/need
world_config = {}
world_config['robot_base'] = self.robot_base
world_config['robot_xy'] = self.layout['robot']
if self.robot_rot is None:
world_config['robot_rot'] = self.random_rot()
else:
world_config['robot_rot'] = float(self.robot_rot)
if self.floor_display_mode:
floor_size = max(self.placements_extents)
world_config['floor_size'] = [floor_size + .1, floor_size + .1, 1]
#if not self.observe_vision:
# world_config['render_context'] = -1 # Hijack this so we don't create context
world_config['observe_vision'] = self.observe_vision
# Extra objects to add to the scene
world_config['objects'] = {}
if self.vases_num:
for i in range(self.vases_num):
name = f'vase{i}'
object = {'name': name,
'size': np.ones(3) * self.vases_size,
'type': 'box',
'density': self.vases_density,
'pos': np.r_[self.layout[name], self.vases_size - self.vases_sink],
'rot': self.random_rot(),
'group': GROUP_VASE,
'rgba': self.vases_color}
world_config['objects'][name] = object
if self.gremlins_num:
self._gremlins_rots = dict()
for i in range(self.gremlins_num):
name = f'gremlin{i}obj'
self._gremlins_rots[i] = self.random_rot()
object = {'name': name,
'size': np.ones(3) * self.gremlins_size,
'type': 'box',
'density': self.gremlins_density,
'pos': np.r_[self.layout[name.replace('obj', '')], self.gremlins_size],
'rot': self._gremlins_rots[i],
'group': GROUP_GREMLIN,
'rgba': self.gremlins_color}
world_config['objects'][name] = object
if self.task == 'push':
object = {'name': 'box',
'type': 'box',
'size': np.ones(3) * self.box_size,
'pos': np.r_[self.layout['box'], self.box_size],
'rot': self.random_rot(),
'density': self.box_density,
'group': GROUP_BOX,
'rgba': self.box_color}
world_config['objects']['box'] = object
# Extra geoms (immovable objects) to add to the scene
world_config['geoms'] = {}
if self.task in ['goal', 'push']:
geom = {'name': 'goal',
'size': [self.goal_size, self.goal_size / 2],
'pos': np.r_[self.layout['goal'], self.goal_size / 2 + 1e-2],
'rot': self.random_rot(),
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': GROUP_GOAL,
'rgba': self.goal_color}
world_config['geoms']['goal'] = geom
if self.hazards_num:
for i in range(self.hazards_num):
name = f'hazard{i}'
geom = {'name': name,
'size': [self.hazards_size, 1e-2],#self.hazards_size / 2],
'pos': np.r_[self.layout[name], 2e-2],#self.hazards_size / 2 + 1e-2],
'rot': self.random_rot(),
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': GROUP_HAZARD,
'rgba': self.hazards_color}
world_config['geoms'][name] = geom
if self.sec_hazards_num:
for i in range(self.sec_hazards_num):
name = f'sec_hazard{i}'
geom = {'name': name,
'size': [self.sec_hazards_size, 1e-2],#self.sec_hazards_size / 2],
'pos': np.r_[self.layout[name], 2e-2],#self.sec_hazards_size / 2 + 1e-2],
'rot': self.random_rot(),
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': GROUP_HAZARD,
'rgba': self.sec_hazards_color}
world_config['geoms'][name] = geom
if self.pillars_num:
for i in range(self.pillars_num):
name = f'pillar{i}'
geom = {'name': name,
'size': [self.pillars_size, self.pillars_height],
'pos': np.r_[self.layout[name], self.pillars_height],
'rot': self.random_rot(),
'type': 'cylinder',
'group': GROUP_PILLAR,
'rgba': self.pillars_color}
world_config['geoms'][name] = geom
if self.walls_num:
for i in range(self.walls_num):
name = f'wall{i}'
geom = {'name': name,
'size': np.ones(3) * self.walls_size,
'pos': np.r_[self.layout[name], self.walls_size],
'rot': 0,
'type': 'box',
'group': GROUP_WALL,
'rgba': self.walls_color}
world_config['geoms'][name] = geom
if self.buttons_num:
for i in range(self.buttons_num):
name = f'button{i}'
geom = {'name': name,
'size': np.ones(3) * self.buttons_size,
'pos': np.r_[self.layout[name], self.buttons_size],
'rot': self.random_rot(),
'type': 'sphere',
'group': GROUP_BUTTON,
'rgba': self.buttons_color}
world_config['geoms'][name] = geom
if self.task == 'circle':
geom = {'name': 'circle',
'size': np.array([self.circle_radius, 1e-2]),
'pos': np.array([0, 0, 2e-2]),
'rot': 0,
'type': 'cylinder',
'contype': 0,
'conaffinity': 0,
'group': GROUP_CIRCLE,
'rgba': COLOR_CIRCLE}
world_config['geoms']['circle'] = geom
# Extra mocap bodies used for control (equality to object of same name)
world_config['mocaps'] = {}
if self.gremlins_num:
for i in range(self.gremlins_num):
name = f'gremlin{i}mocap'
mocap = {'name': name,
'size': np.ones(3) * self.gremlins_size,
'type': 'box',
'pos': np.r_[self.layout[name.replace('mocap', '')], self.gremlins_size],
'rot': self._gremlins_rots[i],
'group': GROUP_GREMLIN,
'rgba': self.gremlins_color}
#'rgba': np.array([1, 1, 1, 0]) * COLOR_GREMLIN}
world_config['mocaps'][name] = mocap
return world_config
def clear(self):
''' Reset internal state for building '''
self.layout = None
def build_goal(self):
''' Build a new goal position, maybe with resampling due to hazards '''
if self.task == 'goal':
self.build_goal_position()
self.last_dist_goal = self.dist_goal()
elif self.task == 'push':
self.build_goal_position()
self.last_dist_goal = self.dist_goal()
self.last_dist_box = self.dist_box()
self.last_box_goal = self.dist_box_goal()
elif self.task == 'button':
assert self.buttons_num > 0, 'Must have at least one button'
self.build_goal_button()
self.last_dist_goal = self.dist_goal()
elif self.task in ['x', 'z']:
self.last_robot_com = self.world.robot_com()
elif self.task in ['circle', 'none']:
pass
else:
raise ValueError(f'Invalid task {self.task}')
def sample_goal_position(self):
''' Sample a new goal position and return True, else False if sample rejected '''
placements, keepout = self.placements['goal']
goal_xy = self.draw_placement(placements, keepout)
for other_name, other_xy in self.layout.items():
other_keepout = self.placements[other_name][1]
dist = np.sqrt(np.sum(np.square(goal_xy - other_xy)))
if dist < other_keepout + self.placements_margin + keepout:
return False
self.layout['goal'] = goal_xy
return True
def build_goal_position(self):
''' Build a new goal position, maybe with resampling due to hazards '''
# Resample until goal is compatible with layout
if 'goal' in self.layout:
del self.layout['goal']
for _ in range(10000): # Retries
if self.sample_goal_position():
break
else:
raise ResamplingError('Failed to generate goal')
# Move goal geom to new layout position
self.world_config_dict['geoms']['goal']['pos'][:2] = self.layout['goal']
#self.world.rebuild(deepcopy(self.world_config_dict))
#self.update_viewer_sim = True
goal_body_id = self.sim.model.body_name2id('goal')
self.sim.model.body_pos[goal_body_id][:2] = self.layout['goal']
self.sim.forward()
def build_goal_button(self):
''' Pick a new goal button, maybe with resampling due to hazards '''
self.goal_button = self.rs.choice(self.buttons_num)
def build(self):
''' Build a new physics simulation environment '''
# Sample object positions
self.build_layout()
# Build the underlying physics world
self.world_config_dict = self.build_world_config()
if self.world is None:
self.world = World(self.world_config_dict)
self.world.reset()
self.world.build()
else:
self.world.reset(build=False)
self.world.rebuild(self.world_config_dict, state=False)
# Redo a small amount of work, and setup initial goal state
self.build_goal()
# Save last action
self.last_action = np.zeros(self.action_space.shape)
# Save last subtree center of mass
self.last_subtreecom = self.world.get_sensor('subtreecom')
def reset(self):
''' Reset the physics simulation and return observation '''
# self._seed += 1 # Increment seed
# self.rs = np.random.RandomState(self._seed)
self.done = False
self.steps = 0 # Count of steps taken in this episode
# Set the button timer to zero (so button is immediately visible)
self.buttons_timer = 0
self.clear()
self.build()
# Save the layout at reset
self.reset_layout = deepcopy(self.layout)
cost = self.cost()
assert cost['cost'] == 0, f'World has starting cost! {cost}'
# Reset stateful parts of the environment
self.first_reset = False # Built our first world successfully
# Return an observation
return self.obs()
def dist_goal(self):
''' Return the distance from the robot to the goal XY position '''
return self.dist_xy(self.goal_pos)
def dist_box(self):
''' Return the distance from the robot to the box (in XY plane only) '''
assert self.task == 'push', f'invalid task {self.task}'
return np.sqrt(np.sum(np.square(self.box_pos - self.world.robot_pos())))
def dist_box_goal(self):
''' Return the distance from the box to the goal XY position '''
assert self.task == 'push', f'invalid task {self.task}'
return np.sqrt(np.sum(np.square(self.box_pos - self.goal_pos)))
def dist_xy(self, pos):
''' Return the distance from the robot to an XY position '''
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2]
robot_pos = self.world.robot_pos()
return np.sqrt(np.sum(np.square(pos - robot_pos[:2])))
def world_xy(self, pos):
''' Return the world XY vector to a position from the robot '''
assert pos.shape == (2,)
return pos - self.world.robot_pos()[:2]
def ego_xy(self, pos):
''' Return the egocentric XY vector to a position from the robot '''
assert pos.shape == (2,), f'Bad pos {pos}'
robot_3vec = self.world.robot_pos()
robot_mat = self.world.robot_mat()
pos_3vec = np.concatenate([pos, [0]]) # Add a zero z-coordinate
world_3vec = pos_3vec - robot_3vec
return np.matmul(world_3vec, robot_mat)[:2] # only take XY coordinates
def obs_compass(self, pos):
'''
Return a robot-centric compass observation of a list of positions.
Compass is a normalized (unit-lenght) egocentric XY vector,
from the agent to the object.
This is equivalent to observing the egocentric XY angle to the target,
projected into the sin/cos space we use for joints.
(See comment on joint observation for why we do this.)
'''
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.concatenate([pos, [0]]) # Add a zero z-coordinate
# Get ego vector in world frame
vec = pos - self.world.robot_pos()
# Rotate into frame
vec = np.matmul(vec, self.world.robot_mat())
# Truncate
vec = vec[:self.compass_shape]
# Normalize
vec /= np.sqrt(np.sum(np.square(vec))) + 0.001
assert vec.shape == (self.compass_shape,), f'Bad vec {vec}'
return vec
def obs_vision(self):
''' Return pixels from the robot camera '''
# Get a render context so we can
rows, cols = self.vision_size
width, height = cols, rows
vision = self.sim.render(width, height, camera_name=self.camera_name, mode='offscreen')
vision = np.array(vision, dtype='float32')[::-1, :, :] / 255
return np.transpose(vision, (2, 0, 1))
def obs_lidar(self, positions, group):
'''
Calculate and return a lidar observation. See sub methods for implementation.
'''
if self.lidar_type == 'pseudo':
return self.obs_lidar_pseudo(positions)
elif self.lidar_type == 'natural':
return self.obs_lidar_natural(group)
else:
raise ValueError(f'Invalid lidar_type {self.lidar_type}')
def obs_lidar_natural(self, group):
'''
Natural lidar casts rays based on the ego-frame of the robot.
Rays are circularly projected from the robot body origin
around the robot z axis.
'''
body = self.model.body_name2id('robot')
grp = np.asarray([i == group for i in range(int(const.NGROUP))], dtype='uint8')
pos = np.asarray(self.world.robot_pos(), dtype='float64')
mat_t = self.world.robot_mat()
obs = np.zeros(self.lidar_num_bins)
for i in range(self.lidar_num_bins):
theta = (i / self.lidar_num_bins) * np.pi * 2
vec = np.matmul(mat_t, theta2vec(theta)) # Rotate from ego to world frame
vec = np.asarray(vec, dtype='float64')
dist, _ = self.sim.ray_fast_group(pos, vec, grp, 1, body)
if dist >= 0:
obs[i] = np.exp(-dist)
return obs
def obs_lidar_pseudo(self, positions):
'''
Return a robot-centric lidar observation of a list of positions.
Lidar is a set of bins around the robot (divided evenly in a circle).
The detection directions are exclusive and exhaustive for a full 360 view.
Each bin reads 0 if there are no objects in that direction.
If there are multiple objects, the distance to the closest one is used.
Otherwise the bin reads the fraction of the distance towards the robot.
E.g. if the object is 90% of lidar_max_dist away, the bin will read 0.1,
and if the object is 10% of lidar_max_dist away, the bin will read 0.9.
(The reading can be thought of as "closeness" or inverse distance)
This encoding has some desirable properties:
- bins read 0 when empty
- bins smoothly increase as objects get close
- maximum reading is 1.0 (where the object overlaps the robot)
- close objects occlude far objects
- constant size observation with variable numbers of objects
'''
obs = np.zeros(self.lidar_num_bins)
for pos in positions:
pos = np.asarray(pos)
if pos.shape == (3,):
pos = pos[:2] # Truncate Z coordinate
z = np.complex(*self.ego_xy(pos)) # X, Y as real, imaginary components
dist = np.abs(z)
angle = np.angle(z) % (np.pi * 2)
bin_size = (np.pi * 2) / self.lidar_num_bins
bin = int(angle / bin_size)
bin_angle = bin_size * bin
if self.lidar_max_dist is None:
sensor = np.exp(-self.lidar_exp_gain * dist)
else:
sensor = max(0, self.lidar_max_dist - dist) / self.lidar_max_dist
obs[bin] = max(obs[bin], sensor)
# Aliasing
if self.lidar_alias:
alias = (angle - bin_angle) / bin_size
assert 0 <= alias <= 1, f'bad alias {alias}, dist {dist}, angle {angle}, bin {bin}'
bin_plus = (bin + 1) % self.lidar_num_bins
bin_minus = (bin - 1) % self.lidar_num_bins
obs[bin_plus] = max(obs[bin_plus], alias * sensor)
obs[bin_minus] = max(obs[bin_minus], (1 - alias) * sensor)
return obs
def obs(self):
''' Return the observation of our agent '''
self.sim.forward() # Needed to get sensordata correct
obs = {}
if self.observe_goal_dist:
obs['goal_dist'] = np.array([np.exp(-self.dist_goal())])
if self.observe_goal_comp:
obs['goal_compass'] = self.obs_compass(self.goal_pos)
if self.observe_goal_lidar:
obs['goal_lidar'] = self.obs_lidar([self.goal_pos], GROUP_GOAL)
if self.task == 'push':
box_pos = self.box_pos
if self.observe_box_comp:
obs['box_compass'] = self.obs_compass(box_pos)
if self.observe_box_lidar:
obs['box_lidar'] = self.obs_lidar([box_pos], GROUP_BOX)
if self.task == 'circle' and self.observe_circle:
obs['circle_lidar'] = self.obs_lidar([self.goal_pos], GROUP_CIRCLE)
if self.observe_freejoint:
joint_id = self.model.joint_name2id('robot')
joint_qposadr = self.model.jnt_qposadr[joint_id]
assert joint_qposadr == 0 # Needs to be the first entry in qpos
obs['freejoint'] = self.data.qpos[:7]
if self.observe_com:
obs['com'] = self.world.robot_com()
if self.observe_sensors:
# Sensors which can be read directly, without processing
for sensor in self.sensors_obs: # Explicitly listed sensors
obs[sensor] = self.world.get_sensor(sensor)
for sensor in self.robot.hinge_vel_names:
obs[sensor] = self.world.get_sensor(sensor)
for sensor in self.robot.ballangvel_names:
obs[sensor] = self.world.get_sensor(sensor)
# Process angular position sensors
if self.sensors_angle_components:
for sensor in self.robot.hinge_pos_names:
theta = float(self.world.get_sensor(sensor)) # Ensure not 1D, 1-element array
obs[sensor] = np.array([np.sin(theta), np.cos(theta)])
for sensor in self.robot.ballquat_names:
quat = self.world.get_sensor(sensor)
obs[sensor] = quat2mat(quat)
else: # Otherwise read sensors directly
for sensor in self.robot.hinge_pos_names:
obs[sensor] = self.world.get_sensor(sensor)
for sensor in self.robot.ballquat_names:
obs[sensor] = self.world.get_sensor(sensor)
if self.observe_remaining:
obs['remaining'] = np.array([self.steps / self.num_steps])
assert 0.0 <= obs['remaining'][0] <= 1.0, 'bad remaining {}'.format(obs['remaining'])
if self.walls_num and self.observe_walls:
obs['walls_lidar'] = self.obs_lidar(self.walls_pos, GROUP_WALL)
if self.observe_hazards:
obs['hazards_lidar'] = self.obs_lidar(self.hazards_pos, GROUP_HAZARD)
if self.observe_sec_hazards:
obs['sec_hazards_lidar'] = self.obs_lidar(self.sec_hazards_pos, GROUP_HAZARD)
if self.observe_vases:
obs['vases_lidar'] = self.obs_lidar(self.vases_pos, GROUP_VASE)
if self.gremlins_num and self.observe_gremlins:
obs['gremlins_lidar'] = self.obs_lidar(self.gremlins_obj_pos, GROUP_GREMLIN)
if self.pillars_num and self.observe_pillars:
obs['pillars_lidar'] = self.obs_lidar(self.pillars_pos, GROUP_PILLAR)
if self.buttons_num and self.observe_buttons:
# Buttons observation is zero while buttons are resetting
if self.buttons_timer == 0:
obs['buttons_lidar'] = self.obs_lidar(self.buttons_pos, GROUP_BUTTON)
else:
obs['buttons_lidar'] = np.zeros(self.lidar_num_bins)
if self.observe_qpos:
obs['qpos'] = self.data.qpos.copy()
if self.observe_qvel:
obs['qvel'] = self.data.qvel.copy()
if self.observe_ctrl:
obs['ctrl'] = self.data.ctrl.copy()
if self.observe_vision:
obs['vision'] = self.obs_vision()
if self.observe_groundtruth:
obs['robot_gt_pos'] = self.robot_pos
obs['goal_gt_pos'] = self.goal_pos
if self.hazards_num > 0:
obs['hazards_gt'] = np.array(self.hazards_pos).flatten()
if self.sec_hazards_num > 0:
obs['sec_hazards_gt'] = np.array(self.sec_hazards_pos).flatten()
if self.vases_num > 0:
vases_velp = np.reshape(self.vases_velp, (self.vases_num, -1))
vases_gt = np.concatenate([self.vases_pos, vases_velp], axis=-1)
obs['vases_gt'] = vases_gt.flatten()
if self.pillars_num > 0:
obs['pillars_gt'] = np.array(self.pillars_pos).flatten()
if self.gremlins_num > 0:
gremlins_velp = np.reshape(self.gremlins_obj_velp, (self.gremlins_num, -1))
gremlins_gt = np.concatenate([self.gremlins_obj_pos, gremlins_velp], axis=-1)
obs['gremlins_gt'] = gremlins_gt.flatten()
if self.buttons_num > 0:
obs['buttons_gt'] = np.array(self.buttons_pos).flatten()
if self.observe_groundtruth_vectors:
num_objects = 6 # number of all constrainable objects
obs['vision'] = []
robot_gt = np.zeros((1, num_objects))
robot_gt[:, 0] = 1.
robot_gt = np.concatenate([robot_gt, np.expand_dims(np.array(self.robot_pos), axis=0)], axis=-1)
obs['vision'].append(robot_gt)
goal_gt = np.zeros((1, num_objects))
goal_gt[:, 1] = 1.
goal_gt = np.concatenate([goal_gt, np.expand_dims(np.array(self.goal_pos), axis=0)], axis=-1)
obs['vision'].append(goal_gt)
if self.hazards_num > 0:
hazards_gt = np.zeros((self.hazards_num, num_objects))
hazards_gt[:, 2] = 1.
hazards_gt = np.concatenate([hazards_gt, self.hazards_pos], axis=-1)
obs['vision'].append(hazards_gt)
if self.sec_hazards_num > 0:
sec_hazards_gt = np.zeros((self.sec_hazards_num, num_objects))
sec_hazards_gt[:, 3] = 1.
sec_hazards_gt = np.concatenate([sec_hazards_gt, self.sec_hazards_pos], axis=1)
obs['vision'].append(sec_hazards_gt)
if self.vases_num > 0:
vases_gt = np.zeros((self.vases_num, num_objects))
vases_gt[:, 4] = 1.
vases_gt = np.concatenate([vases_gt, self.vases_pos], axis=-1)
obs['vision'].append(vases_gt)
if self.pillars_num > 0:
pillars_gt = np.zeros((self.pillars_num, num_objects))
pillars_gt[:, 5] = 1.
pillars_gt = np.concatenate([pillars_gt, self.pillars_pos], axis=-1)
obs['vision'].append(pillars_gt)
# shuffle object representations
obs['vision'] = np.concatenate(obs['vision'], axis=0)
shuffle_idx = self.rs.rand(obs['vision'].shape[0]).argsort()
obs['vision'] = obs['vision'][shuffle_idx]
if self.observation_flatten:
flat_obs = np.zeros(self.obs_flat_size)
offset = 0
for k in sorted(self.obs_space_dict.keys()):
k_size = np.prod(obs[k].shape)
flat_obs[offset:offset + k_size] = obs[k].flat
offset += k_size
obs = flat_obs
assert self.observation_space.contains(obs), f'Bad obs {obs} {self.observation_space}'
return obs
def cost(self):
''' Calculate the current costs and return a dict '''
self.sim.forward() # Ensure positions and contacts are correct
cost = {}
# Conctacts processing
if self.constrain_vases:
cost['cost_vases_contact'] = 0
if self.constrain_pillars:
cost['cost_pillars'] = 0
if self.constrain_buttons:
cost['cost_buttons'] = 0
if self.constrain_gremlins:
cost['cost_gremlins'] = 0
buttons_constraints_active = self.constrain_buttons and (self.buttons_timer == 0)
for contact in self.data.contact[:self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom_id2name(g) for g in geom_ids])
if self.constrain_vases and any(n.startswith('vase') for n in geom_names):
if any(n in self.robot.geom_names for n in geom_names):
cost['cost_vases_contact'] += self.vases_contact_cost
if self.constrain_pillars and any(n.startswith('pillar') for n in geom_names):
if any(n in self.robot.geom_names for n in geom_names):
cost['cost_pillars'] += self.pillars_cost
if buttons_constraints_active and any(n.startswith('button') for n in geom_names):
if any(n in self.robot.geom_names for n in geom_names):
if not any(n == f'button{self.goal_button}' for n in geom_names):
cost['cost_buttons'] += self.buttons_cost
if self.constrain_gremlins and any(n.startswith('gremlin') for n in geom_names):
if any(n in self.robot.geom_names for n in geom_names):
cost['cost_gremlins'] += self.gremlins_contact_cost
# Displacement processing
if self.constrain_vases and self.vases_displace_cost:
cost['cost_vases_displace'] = 0
for i in range(self.vases_num):
name = f'vase{i}'
dist = np.sqrt(np.sum(np.square(self.data.get_body_xpos(name)[:2] - self.reset_layout[name])))
if dist > self.vases_displace_threshold:
cost['cost_vases_displace'] += dist * self.vases_displace_cost
# Velocity processing
if self.constrain_vases and self.vases_velocity_cost:
# TODO: penalize rotational velocity too, but requires another cost coefficient
cost['cost_vases_velocity'] = 0
for i in range(self.vases_num):
name = f'vase{i}'
vel = np.sqrt(np.sum(np.square(self.data.get_body_xvelp(name))))
if vel >= self.vases_velocity_threshold:
cost['cost_vases_velocity'] += vel * self.vases_velocity_cost
# Calculate constraint violations
if self.constrain_hazards:
cost['cost_hazards'] = 0
for h_pos in self.hazards_pos:
h_dist = self.dist_xy(h_pos)
if h_dist <= self.hazards_size:
cost['cost_hazards'] += self.hazards_cost # * (self.hazards_size - h_dist)
if self.constrain_sec_hazards:
cost['cost_sec_hazards'] = 0
for h_pos in self.sec_hazards_pos:
h_dist = self.dist_xy(h_pos)
if h_dist <= self.sec_hazards_size:
cost['cost_sec_hazards'] += self.sec_hazards_cost
# Sum all costs into single total cost
cost['cost'] = sum(v for k, v in cost.items() if k.startswith('cost_'))
# Optionally remove shaping from reward functions.
if self.constrain_indicator:
for k in list(cost.keys()):
cost[k] = float(cost[k] > 0.0) # Indicator function
self._cost = cost
return cost
def goal_met(self):
''' Return true if the current goal is met this step '''
if self.task == 'goal':
return self.dist_goal() <= self.goal_size #+ 0.08 # TODO remove 0.08
if self.task == 'push':
return self.dist_box_goal() <= self.goal_size
if self.task == 'button':
for contact in self.data.contact[:self.data.ncon]:
geom_ids = [contact.geom1, contact.geom2]
geom_names = sorted([self.model.geom_id2name(g) for g in geom_ids])
if any(n == f'button{self.goal_button}' for n in geom_names):
if any(n in self.robot.geom_names for n in geom_names):
return True
return False
if self.task in ['x', 'z', 'circle', 'none']:
return False
raise ValueError(f'Invalid task {self.task}')
def set_mocaps(self):
''' Set mocap object positions before a physics step is executed '''
if self.gremlins_num: # self.constrain_gremlins:
phase = float(self.data.time)
for i in range(self.gremlins_num):
name = f'gremlin{i}'
target = np.array([np.sin(phase), np.cos(phase)]) * self.gremlins_travel
pos = np.r_[target, [self.gremlins_size]]
self.data.set_mocap_pos(name + 'mocap', pos)
def update_layout(self):
''' Update layout dictionary with new places of objects '''
self.sim.forward()
for k in list(self.layout.keys()):
# Mocap objects have to be handled separately
if 'gremlin' in k:
continue
self.layout[k] = self.data.get_body_xpos(k)[:2].copy()
def buttons_timer_tick(self):
''' Tick the buttons resampling timer '''
self.buttons_timer = max(0, self.buttons_timer - 1)
def step(self, action):
''' Take a step and return observation, reward, done, and info '''
action = np.array(action, copy=False) # Cast to ndarray
assert not self.done, 'Environment must be reset before stepping'
info = {}
# Set action
action_range = self.model.actuator_ctrlrange
# action_scale = action_range[:,1] - action_range[:, 0]
self.data.ctrl[:] = np.clip(action, action_range[:,0], action_range[:,1]) #np.clip(action * 2 / action_scale, -1, 1)
if self.action_noise:
self.data.ctrl[:] += self.action_noise * self.rs.randn(self.model.nu)
# Simulate physics forward
exception = False
for _ in range(self.rs.binomial(self.frameskip_binom_n, self.frameskip_binom_p)):
try:
self.set_mocaps()
self.sim.step() # Physics simulation step
except MujocoException as me:
print('MujocoException', me)
exception = True
break
if exception:
self.done = True
reward = self.reward_exception
info['cost_exception'] = 1.0
else:
self.sim.forward() # Needed to get sensor readings correct!
# Reward processing
reward = self.reward()
# Constraint violations
info.update(self.cost())
# Button timer (used to delay button resampling)
self.buttons_timer_tick()
# Goal processing
if self.goal_met():
info['goal_met'] = True
reward += self.reward_goal
if self.continue_goal:
# Update the internal layout so we can correctly resample (given objects have moved)
self.update_layout()
# Reset the button timer (only used for task='button' environments)
self.buttons_timer = self.buttons_resampling_delay
# Try to build a new goal, end if we fail
if self.terminate_resample_failure:
try:
self.build_goal()
except ResamplingError as e:
# Normal end of episode
self.done = True
else:
# Try to make a goal, which could raise a ResamplingError exception
self.build_goal()
else:
self.done = True
else:
info['goal_met'] = False
# Timeout
self.steps += 1
if self.steps >= self.num_steps:
self.done = True # Maximum number of steps in an episode reached
if self.add_cost_to_reward:
reward -= info['cost']
return self.obs(), reward, self.done, info
def reward(self):
''' Calculate the dense component of reward. Call exactly once per step '''
reward = 0.0
# Distance from robot to goal
if self.task in ['goal', 'button']:
dist_goal = self.dist_goal()
reward += (self.last_dist_goal - dist_goal) * self.reward_distance
self.last_dist_goal = dist_goal
# Distance from robot to box
if self.task == 'push':
dist_box = self.dist_box()
gate_dist_box_reward = (self.last_dist_box > self.box_null_dist * self.box_size)
reward += (self.last_dist_box - dist_box) * self.reward_box_dist * gate_dist_box_reward
self.last_dist_box = dist_box
# Distance from box to goal
if self.task == 'push':
dist_box_goal = self.dist_box_goal()
reward += (self.last_box_goal - dist_box_goal) * self.reward_box_goal
self.last_box_goal = dist_box_goal
# Used for forward locomotion tests
if self.task == 'x':
robot_com = self.world.robot_com()
reward += (robot_com[0] - self.last_robot_com[0]) * self.reward_x
self.last_robot_com = robot_com
# Used for jump up tests
if self.task == 'z':
robot_com = self.world.robot_com()
reward += (robot_com[2] - self.last_robot_com[2]) * self.reward_z
self.last_robot_com = robot_com
# Circle environment reward
if self.task == 'circle':
robot_com = self.world.robot_com()
robot_vel = self.world.robot_vel()
x, y, _ = robot_com
u, v, _ = robot_vel
radius = np.sqrt(x**2 + y**2)
reward += (((-u*y + v*x)/radius)/(1 + np.abs(radius - self.circle_radius))) * self.reward_circle
# Intrinsic reward for uprightness
if self.reward_orientation:
zalign = quat2zalign(self.data.get_body_xquat(self.reward_orientation_body))
reward += self.reward_orientation_scale * zalign
# Clip reward
if self.reward_clip:
in_range = reward < self.reward_clip and reward > -self.reward_clip
if not(in_range):
reward = np.clip(reward, -self.reward_clip, self.reward_clip)
print('Warning: reward was outside of range!')
return reward
def render_lidar(self, poses, color, offset, group):
''' Render the lidar observation '''
robot_pos = self.world.robot_pos()
robot_mat = self.world.robot_mat()
lidar = self.obs_lidar(poses, group)
for i, sensor in enumerate(lidar):
if self.lidar_type == 'pseudo':
i += 0.5 # Offset to center of bin
theta = 2 * np.pi * i / self.lidar_num_bins
rad = self.render_lidar_radius
binpos = np.array([np.cos(theta) * rad, np.sin(theta) * rad, offset])
pos = robot_pos + np.matmul(binpos, robot_mat.transpose())
alpha = min(1, sensor + .1)
self.viewer.add_marker(pos=pos,
size=self.render_lidar_size * np.ones(3),
type=const.GEOM_SPHERE,
rgba=np.array(color) * alpha,
label='')
def render_compass(self, pose, color, offset):
''' Render a compass observation '''
robot_pos = self.world.robot_pos()
robot_mat = self.world.robot_mat()
# Truncate the compass to only visualize XY component
compass = np.concatenate([self.obs_compass(pose)[:2] * 0.15, [offset]])
pos = robot_pos + np.matmul(compass, robot_mat.transpose())
self.viewer.add_marker(pos=pos,
size=.05 * np.ones(3),
type=const.GEOM_SPHERE,
rgba=np.array(color) * 0.5,
label='')
def render_area(self, pos, size, color, label='', alpha=0.1):
''' Render a radial area in the environment '''
z_size = min(size, 0.3)
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(pos=pos,
size=[size, size, z_size],
type=const.GEOM_CYLINDER,
rgba=np.array(color) * alpha,
label=label if self.render_labels else '')
def render_sphere(self, pos, size, color, label='', alpha=0.1):
''' Render a radial area in the environment '''
pos = np.asarray(pos)
if pos.shape == (2,):
pos = np.r_[pos, 0] # Z coordinate 0
self.viewer.add_marker(pos=pos,
size=size * np.ones(3),
type=const.GEOM_SPHERE,
rgba=np.array(color) * alpha,
label=label if self.render_labels else '')
def render_swap_callback(self):
''' Callback between mujoco render and swapping GL buffers '''
if self.observe_vision and self.vision_render:
self.viewer.draw_pixels(self.save_obs_vision, 0, 0)
def render(self,
mode='human',
camera_id=None,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT
):
''' Render the environment to the screen '''
if self.viewer is None or mode!=self._old_render_mode:
# Set camera if specified
if mode == 'human':
self.viewer = MjViewer(self.sim)
self.viewer.cam.fixedcamid = -1
self.viewer.cam.type = const.CAMERA_FREE
else:
self.viewer = MjRenderContextOffscreen(self.sim)
self.viewer._hide_overlay = True
self.viewer.cam.fixedcamid = camera_id #self.model.camera_name2id(mode)
self.viewer.cam.type = const.CAMERA_FIXED
self.viewer.render_swap_callback = self.render_swap_callback
# Turn all the geom groups on
self.viewer.vopt.geomgroup[:] = 1
self._old_render_mode = mode
self.viewer.update_sim(self.sim)
if camera_id is not None:
# Update camera if desired
self.viewer.cam.fixedcamid = camera_id
# Lidar markers
if self.render_lidar_markers:
offset = self.render_lidar_offset_init # Height offset for successive lidar indicators
if 'box_lidar' in self.obs_space_dict or 'box_compass' in self.obs_space_dict:
if 'box_lidar' in self.obs_space_dict:
self.render_lidar([self.box_pos], self.box_color, offset, GROUP_BOX)
if 'box_compass' in self.obs_space_dict:
self.render_compass(self.box_pos, self.box_color, offset)
offset += self.render_lidar_offset_delta
if 'goal_lidar' in self.obs_space_dict or 'goal_compass' in self.obs_space_dict:
if 'goal_lidar' in self.obs_space_dict:
self.render_lidar([self.goal_pos], self.goal_color, offset, GROUP_GOAL)
if 'goal_compass' in self.obs_space_dict:
self.render_compass(self.goal_pos, self.goal_color, offset)
offset += self.render_lidar_offset_delta
if 'buttons_lidar' in self.obs_space_dict:
self.render_lidar(self.buttons_pos, self.buttons_color, offset, GROUP_BUTTON)
offset += self.render_lidar_offset_delta
if 'circle_lidar' in self.obs_space_dict:
self.render_lidar([ORIGIN_COORDINATES], COLOR_CIRCLE, offset, GROUP_CIRCLE)
offset += self.render_lidar_offset_delta
if 'walls_lidar' in self.obs_space_dict:
self.render_lidar(self.walls_pos, self.walls_color, offset, GROUP_WALL)
offset += self.render_lidar_offset_delta
if 'hazards_lidar' in self.obs_space_dict:
self.render_lidar(self.hazards_pos, self.hazards_color, offset, GROUP_HAZARD)
offset += self.render_lidar_offset_delta
if 'sec_hazards_lidar' in self.obs_space_dict:
self.render_lidar(self.sec_hazards_pos, self.sec_hazards_color, offset, GROUP_HAZARD)
offset += self.render_lidar_offset_delta
if 'pillars_lidar' in self.obs_space_dict:
self.render_lidar(self.pillars_pos, self.pillars_color, offset, GROUP_PILLAR)
offset += self.render_lidar_offset_delta
if 'gremlins_lidar' in self.obs_space_dict:
self.render_lidar(self.gremlins_obj_pos, self.gremlins_color, offset, GROUP_GREMLIN)
offset += self.render_lidar_offset_delta
if 'vases_lidar' in self.obs_space_dict:
self.render_lidar(self.vases_pos, self.vases_color, offset, GROUP_VASE)
offset += self.render_lidar_offset_delta
# Add goal marker
if self.task == 'button':
self.render_area(self.goal_pos, self.buttons_size * 2, self.buttons_color, 'goal', alpha=0.1)
# Add indicator for nonzero cost
if self._cost.get('cost', 0) > 0:
self.render_sphere(self.world.robot_pos(), 0.25, COLOR_RED, alpha=.5)
# Draw vision pixels
if self.observe_vision and self.vision_render:
vision = self.obs_vision()
vision = np.array(vision * 255, dtype='uint8')
vision = Image.fromarray(vision).resize(self.vision_render_size)
vision = np.array(vision, dtype='uint8')
self.save_obs_vision = vision
if mode=='human':
self.viewer.render()
elif mode=='rgb_array':
self.viewer.render(width, height)
data = self.viewer.read_pixels(width, height, depth=False)
self.viewer._markers[:] = []
self.viewer._overlay.clear()
return data[::-1, :, :] | [
[
[
30,
33
]
],
[
[
41,
51
],
[
2104,
2107
],
[
2113,
2116
],
[
17054,
17057
],
[
17259,
17262
],
[
21129,
21132
],
[
21250,
21253
],
[
21505,
21508
],
[
22040,
22043
],
[
22192,
22195
],
[
22727,
22730
],
[
23794,
23797
],
[
24135,
24138
],
[
24293,
24296
],
[
24467,
24470
],
[
24619,
24622
],
[
24763,
24766
],
[
24892,
24895
],
[
25038,
25041
],
[
25208,
25211
],
[
25352,
25355
],
[
25495,
25498
],
[
25641,
25644
],
[
25795,
25798
],
[
25937,
25940
],
[
26107,
26110
],
[
26274,
26277
],
[
26441,
26444
],
[
26575,
26578
],
[
26710,
26713
],
[
26845,
26848
],
[
27112,
27115
],
[
27260,
27263
],
[
27360,
27363
],
[
27500,
27503
],
[
27667,
27670
],
[
27826,
27829
],
[
27989,
27992
],
[
28150,
28153
],
[
28316,
28319
],
[
28503,
28506
],
[
28971,
28974
],
[
29095,
29098
]
],
[
[
59,
70
],
[
501,
503
],
[
536,
538
],
[
1031,
1033
],
[
7811,
7813
],
[
8336,
8338
],
[
10250,
10252
],
[
11445,
11447
],
[
12704,
12706
],
[
13269,
13271
],
[
14585,
14587
],
[
15218,
15220
],
[
16079,
16081
],
[
1408,
1410
],
[
1418,
1420
],
[
1433,
1435
],
[
1553,
1555
],
[
1593,
1595
],
[
17305,
17307
],
[
18822,
18824
],
[
21145,
21147
],
[
21153,
21155
],
[
21173,
21175
],
[
21266,
21268
],
[
21274,
21276
],
[
21294,
21296
],
[
21521,
21523
],
[
21529,
21531
],
[
21551,
21553
],
[
22056,
22058
],
[
22064,
22066
],
[
22084,
22086
],
[
22208,
22210
],
[
22216,
22218
],
[
22236,
22238
],
[
22743,
22745
],
[
22751,
22753
],
[
22771,
22773
],
[
23810,
23812
],
[
23818,
23820
],
[
23840,
23842
],
[
24151,
24153
],
[
24159,
24161
],
[
24179,
24181
],
[
24309,
24311
],
[
24317,
24319
],
[
24337,
24339
],
[
24522,
24524
],
[
24674,
24676
],
[
24800,
24802
],
[
24947,
24949
],
[
25093,
25095
],
[
25263,
25265
],
[
25389,
25391
],
[
25550,
25552
],
[
25696,
25698
],
[
25850,
25852
],
[
25992,
25994
],
[
26162,
26164
],
[
26329,
26331
],
[
26496,
26498
],
[
26591,
26593
],
[
26599,
26601
],
[
26631,
26633
],
[
26726,
26728
],
[
26734,
26736
],
[
26766,
26768
],
[
26861,
26863
],
[
26869,
26871
],
[
26901,
26903
],
[
27166,
27168
],
[
27276,
27278
],
[
27284,
27286
],
[
27304,
27306
],
[
27376,
27378
],
[
27384,
27386
],
[
27404,
27406
],
[
27516,
27518
],
[
27524,
27526
],
[
27563,
27565
],
[
27683,
27685
],
[
27691,
27693
],
[
27734,
27736
],
[
27842,
27844
],
[
27850,
27852
],
[
27893,
27895
],
[
28005,
28007
],
[
28013,
28015
],
[
28052,
28054
],
[
28166,
28168
],
[
28174,
28176
],
[
28220,
28222
],
[
28332,
28334
],
[
28340,
28342
],
[
28379,
28381
],
[
28519,
28521
],
[
28527,
28529
],
[
28713,
28715
],
[
28877,
28879
],
[
28987,
28989
],
[
28995,
28997
],
[
29032,
29034
],
[
32413,
32415
],
[
32482,
32484
],
[
32679,
32681
],
[
35903,
35905
],
[
35921,
35923
],
[
36070,
36072
],
[
36282,
36284
],
[
37364,
37366
],
[
37525,
37527
],
[
38083,
38085
],
[
38250,
38252
],
[
38668,
38670
],
[
38725,
38727
],
[
39275,
39277
],
[
39886,
39888
],
[
40556,
40558
],
[
41205,
41207
],
[
41671,
41673
],
[
41732,
41734
],
[
42175,
42177
],
[
42238,
42240
],
[
42625,
42627
],
[
42690,
42692
],
[
43301,
43303
],
[
43406,
43408
],
[
45145,
45147
],
[
45153,
45155
],
[
45160,
45162
],
[
47005,
47007
],
[
48267,
48269
],
[
48275,
48277
],
[
48282,
48284
],
[
48515,
48517
],
[
48523,
48525
],
[
48530,
48532
],
[
48684,
48686
],
[
48814,
48816
],
[
48822,
48824
],
[
48829,
48831
],
[
49307,
49309
],
[
49419,
49421
],
[
49940,
49942
],
[
50004,
50006
],
[
50183,
50185
],
[
50315,
50317
],
[
50323,
50325
],
[
50330,
50332
],
[
50748,
50750
],
[
50815,
50817
],
[
51565,
51567
],
[
51653,
51655
],
[
51758,
51760
],
[
51881,
51883
],
[
51909,
51911
],
[
51996,
51998
],
[
52150,
52152
],
[
53310,
53312
],
[
53388,
53390
],
[
53509,
53511
],
[
53596,
53598
],
[
53626,
53628
],
[
53641,
53643
],
[
53676,
53678
],
[
53857,
53859
],
[
54748,
54750
],
[
54758,
54760
],
[
56521,
56523
],
[
56531,
56533
],
[
56546,
56548
],
[
57087,
57089
],
[
58288,
58290
],
[
58836,
58838
],
[
58954,
58956
],
[
59059,
59061
],
[
59136,
59138
],
[
59316,
59318
],
[
59423,
59425
],
[
59513,
59515
],
[
59709,
59711
],
[
59913,
59915
],
[
59995,
59997
],
[
60021,
60023
],
[
60036,
60038
],
[
60147,
60149
],
[
60227,
60229
],
[
60252,
60254
],
[
60267,
60269
],
[
60420,
60422
],
[
60529,
60531
],
[
60708,
60710
],
[
60829,
60831
],
[
61007,
61009
],
[
61110,
61112
],
[
61275,
61277
],
[
61384,
61386
],
[
61563,
61565
],
[
61790,
61792
],
[
61924,
61926
],
[
64220,
64222
],
[
64228,
64230
],
[
64235,
64237
],
[
64777,
64779
],
[
64785,
64787
],
[
64792,
64794
],
[
67192,
67194
],
[
67202,
67204
],
[
67217,
67219
],
[
67278,
67280
],
[
67990,
67992
],
[
68298,
68300
],
[
72459,
72461
],
[
72530,
72532
],
[
73004,
73006
],
[
73539,
73541
],
[
73635,
73637
],
[
73645,
73647
],
[
73666,
73668
],
[
73726,
73728
],
[
73916,
73918
],
[
74027,
74029
],
[
74360,
74362
],
[
74448,
74450
],
[
74572,
74574
],
[
74675,
74677
],
[
74908,
74910
],
[
74972,
74974
],
[
75195,
75197
],
[
75433,
75435
],
[
75497,
75499
],
[
75612,
75614
],
[
75715,
75717
],
[
80355,
80357
],
[
80491,
80493
],
[
33180,
33182
],
[
33188,
33190
],
[
33195,
33197
]
],
[
[
87,
92
],
[
80414,
80419
]
],
[
[
110,
118
],
[
17647,
17655
],
[
17697,
17705
],
[
47638,
47646
]
],
[
[
143,
154
],
[
21019,
21030
]
],
[
[
162,
171
],
[
1626,
1635
]
],
[
[
194,
202
],
[
76432,
76440
]
],
[
[
204,
219
],
[
68788,
68803
]
],
[
[
221,
226
],
[
51607,
51612
],
[
73968,
73973
],
[
74620,
74625
],
[
75138,
75143
],
[
75660,
75665
],
[
76538,
76543
],
[
76815,
76820
]
],
[
[
228,
252
],
[
76604,
76628
]
],
[
[
288,
293
],
[
46640,
46645
]
],
[
[
295,
300
],
[
17207,
17212
]
],
[
[
309,
312
]
],
[
[
486,
498
],
[
42927,
42939
],
[
78544,
78556
]
],
[
[
524,
533
],
[
80189,
80198
]
],
[
[
833,
843
],
[
39517,
39527
],
[
54987,
54997
],
[
78021,
78031
]
],
[
[
848,
857
],
[
38897,
38906
],
[
55267,
55276
],
[
77589,
77598
]
],
[
[
862,
874
],
[
42408,
42420
],
[
58217,
58229
],
[
78363,
78375
]
],
[
[
879,
889
],
[
41881,
41891
],
[
57339,
57349
],
[
78766,
78776
]
],
[
[
894,
906
],
[
41379,
41391
],
[
57967,
57979
],
[
79394,
79406
]
],
[
[
911,
923
],
[
40156,
40168
],
[
40830,
40842
],
[
57452,
57464
],
[
57579,
57591
],
[
78970,
78982
],
[
79188,
79200
]
],
[
[
928,
938
],
[
37673,
37683
],
[
57688,
57698
],
[
79808,
79818
]
],
[
[
943,
956
],
[
38407,
38420
],
[
43563,
43576
],
[
57830,
57843
],
[
79607,
79620
]
],
[
[
961,
973
],
[
42885,
42897
],
[
55402,
55414
],
[
78566,
78578
]
],
[
[
1010,
1028
],
[
18750,
18768
],
[
78523,
78541
]
],
[
[
1118,
1131
],
[
76145,
76158
]
],
[
[
1138,
1152
],
[
76182,
76196
]
],
[
[
1166,
1181
],
[
32827,
32842
],
[
45733,
45748
],
[
70089,
70104
]
],
[
[
1297,
1306
],
[
51926,
51935
]
],
[
[
1460,
1468
],
[
56710,
56718
]
],
[
[
1699,
1710
],
[
72689,
72700
]
],
[
[
2097,
2103
]
]
] |
"""Run an example script to quickly test any MyQ account."""
import asyncio
from aiohttp import ClientSession
import pymyq
from pymyq.errors import MyQError
async def main() -> None:
"""Create the aiohttp session and run the example."""
async with ClientSession() as websession:
try:
myq = await pymyq.login(
'<EMAIL>', '<PASSWORD>', '<BRAND>', websession)
devices = await myq.get_devices()
for idx, device in enumerate(devices):
print('Device #{0}: {1}'.format(idx + 1, device.name))
print('--------')
print('Brand: {0}'.format(device.brand))
print('Type: {0}'.format(device.type))
print('Serial: {0}'.format(device.serial))
print('Device ID: {0}'.format(device.device_id))
print('Parent ID: {0}'.format(device.parent_id))
print('Current State: {0}'.format(device.state))
print()
print('Opening the device...')
await device.open()
print('Current State: {0}'.format(device.state))
await asyncio.sleep(15)
print('Closing the device...')
await device.close()
print('Current State: {0}'.format(device.state))
except MyQError as err:
print(err)
asyncio.get_event_loop().run_until_complete(main())
| [
[
[
68,
75
],
[
1392,
1399
],
[
1168,
1175
]
],
[
[
97,
110
],
[
260,
273
]
],
[
[
119,
124
],
[
328,
333
]
],
[
[
150,
158
],
[
1350,
1358
]
],
[
[
161,
1389
],
[
1436,
1440
]
]
] |
#!/usr/bin/env python
from ansible.module_utils.basic import *
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cisco_ucs_ldap_provider_group
short_description: configures ldap provider group on a cisco ucs server
version_added: 0.9.0.0
description:
- configures ldap provider group on a cisco ucs server
options:
state:
description:
- if C(present), will perform create/add/enable operation
- if C(absent), will perform delete/remove/disable operation
required: false
choices: ['present', 'absent']
default: "present"
name:
version_added: "1.4(1i)"
description: ldap provider group name
required: true
descr:
version_added: "1.4(1i)"
description: ldap provider group description
required: false
requirements: ['ucsmsdk', 'ucsm_apis']
author: "Cisco Systems Inc(ucs-python@cisco.com)"
'''
EXAMPLES = '''
- name:
cisco_ucs_ldap_provider_group:
name: "test_ldap_provider_group"
descr: "description"
state: "present"
ucs_ip: "192.168.1.1"
ucs_username: "admin"
ucs_password: "password"
'''
def _argument_mo():
return dict(
name=dict(required=True, type='str'),
descr=dict(type='str'),
)
def _argument_custom():
return dict(
state=dict(default="present",
choices=['present', 'absent'],
type='str'),
)
def _argument_connection():
return dict(
# UcsHandle
ucs_server=dict(type='dict'),
# Ucs server credentials
ucs_ip=dict(type='str'),
ucs_username=dict(default="admin", type='str'),
ucs_password=dict(type='str', no_log=True),
ucs_port=dict(default=None),
ucs_secure=dict(default=None),
ucs_proxy=dict(default=None)
)
def _ansible_module_create():
argument_spec = dict()
argument_spec.update(_argument_mo())
argument_spec.update(_argument_custom())
argument_spec.update(_argument_connection())
return AnsibleModule(argument_spec,
supports_check_mode=True)
def _get_mo_params(params):
from ansible.module_utils.cisco_ucs import UcsConnection
args = {}
for key in _argument_mo():
if params.get(key) is None:
continue
args[key] = params.get(key)
return args
def setup_ldap_provider_group(server, module):
from ucsm_apis.admin.ldap import ldap_provider_group_create
from ucsm_apis.admin.ldap import ldap_provider_group_delete
from ucsm_apis.admin.ldap import ldap_provider_group_exists
ansible = module.params
args_mo = _get_mo_params(ansible)
exists, mo = ldap_provider_group_exists(handle=server, **args_mo)
if ansible["state"] == "present":
if module.check_mode or exists:
return not exists
ldap_provider_group_create(handle=server, **args_mo)
else:
if module.check_mode or not exists:
return exists
ldap_provider_group_delete(server, mo.name)
return True
def setup(server, module):
result = {}
err = False
try:
result["changed"] = setup_ldap_provider_group(server, module)
except Exception as e:
err = True
result["msg"] = "setup error: %s " % str(e)
result["changed"] = False
return result, err
def main():
from ansible.module_utils.cisco_ucs import UcsConnection
module = _ansible_module_create()
conn = UcsConnection(module)
server = conn.login()
result, err = setup(server, module)
conn.logout()
if err:
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
[
[
62,
63
],
[
2170,
2183
]
],
[
[
65,
81
]
],
[
[
206,
219
]
],
[
[
1032,
1040
]
],
[
[
1262,
1274
],
[
2048,
2060
],
[
2370,
2382
]
],
[
[
1401,
1417
],
[
2089,
2105
]
],
[
[
1570,
1590
],
[
2134,
2154
]
],
[
[
1970,
1992
],
[
3580,
3602
]
],
[
[
2256,
2270
],
[
2781,
2795
]
],
[
[
2501,
2526
],
[
3293,
3318
]
],
[
[
3200,
3205
],
[
3682,
3687
]
],
[
[
3497,
3501
],
[
3833,
3837
]
]
] |
# Generated by Django 2.0.6 on 2019-02-20 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dog_account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='accountNumber',
field=models.IntegerField(),
),
]
| [
[
[
71,
81
],
[
108,
118
],
[
228,
238
]
],
[
[
83,
89
],
[
337,
343
]
],
[
[
98,
107
]
]
] |
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from django.contrib import admin
from dj_experiment.urls import urlpatterns as dj_experiment_urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(dj_experiment_urls, namespace='dj_experiment')),
]
| [
[
[
43,
59
]
],
[
[
61,
76
]
],
[
[
107,
110
],
[
240,
243
],
[
287,
290
]
],
[
[
112,
119
],
[
256,
263
],
[
297,
304
]
],
[
[
147,
152
],
[
264,
269
]
],
[
[
185,
218
],
[
305,
323
]
],
[
[
220,
231
]
]
] |
import numpy as np
from sklearn.base import BaseEstimator
from tensorflow import keras
from .recommenders import KerasRecommender
class ItemPopularity(BaseEstimator):
"""Recommender based solely on interactions per item."""
def fit(self, X=None, y=None):
"""Fit the recommender from the training dataset.
Args:
X (ndarray of shape (n_samples, 2)): An array where each row
consists of a user and an
item.
y (ndarray of shape (n_samples,)): An array where each entry
denotes interactions between
the corresponding user and item.
"""
unique, counts = np.unique(X[y == 1, 1], return_counts=True)
self.interactions_by_item = dict(zip(unique, counts))
def predict(self, X=None):
"""Predict the scores for the provided data.
Args:
X (ndarray of shape (n_samples, 2)): An array where each row
consists of a user and an
item.
Returns:
ndarray of shape (n_samples,): Class labels for each data sample.
"""
y_pred = np.array([self.interactions_by_item[i] for i in X[:, 1]])
return y_pred / max(y_pred)
class GeneralizedMatrixFactorization(KerasRecommender):
"""Recommender implementing the GMF architecture.
Args:
n_factors (int): The number of latent factors.
epochs (int): The number of epochs to train the NN.
optimizer (keras.optimizers.Optimizer): The model's optimizer.
loss (keras.losses.Loss): The loss function.
metrics (List[keras.metrics.Metric, ...]): The metric functions.
seed (int): A random seed.
user_input (keras.Input): An input for the users.
item_input (keras.Input): An input for the items.
user_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the users.
item_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the items.
"""
def __init__(self,
n_factors=8,
epochs=10,
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()],
seed=None,
user_input=None,
item_input=None,
user_preprocessing_layers=None,
item_preprocessing_layers=None):
super().__init__(epochs,
optimizer,
loss,
metrics,
seed,
user_input,
item_input,
user_preprocessing_layers,
item_preprocessing_layers)
self.n_factors = n_factors
self.user_input = user_input
self.item_input = item_input
self.user_preprocessing_layers = user_preprocessing_layers
self.item_preprocessing_layers = item_preprocessing_layers
@staticmethod
def create_core_layers(n_factors,
user_layers,
item_layers,
user_dense_kwdargs={},
item_dense_kwdargs={}):
"""Creates the core layers of the GMF model.
Returns the hidden layers of the model. Specifically, the ones between
the inputs and the visible, output layer.
Args:
n_factors (int): The number of latent factors.
user_layers (keras.layers.Layer): The input or preprocessing layers
for the users.
item_layers (keras.layers.Layer): The input or preprocessing layers
for the items.
user_dense_kwdargs (Dict): The keyword arguments for the
user dense layer.
item_dense_kwdargs (Dict): The keyword arguments for the
item dense layer.
Returns:
keras.layers.Layer: The core layers of the model.
"""
gmf_layers = [
keras.layers.Dense(n_factors, **user_dense_kwdargs)(user_layers),
keras.layers.Dense(n_factors, **item_dense_kwdargs)(item_layers)
]
gmf_layers = keras.layers.Multiply()(gmf_layers)
return gmf_layers
def create_model(self):
"""Creates a new GMF model."""
user_input = (self.user_input
if self.user_input is not None else
keras.Input(shape=(1), name="user", dtype="int64"))
item_input = (self.item_input
if self.item_input is not None else
keras.Input(shape=(1), name="item", dtype="int64"))
user_preprocessing_layers = (
self.user_preprocessing_layers
if self.user_preprocessing_layers is not None
else user_input
)
item_preprocessing_layers = (
self.item_preprocessing_layers
if self.item_preprocessing_layers is not None
else item_input
)
gmf_layers = GeneralizedMatrixFactorization.create_core_layers(
self.n_factors,
user_preprocessing_layers,
item_preprocessing_layers
)
gmf_output = keras.layers.Dense(
1,
activation="sigmoid",
kernel_constraint=keras.constraints.unit_norm()
)(gmf_layers)
return keras.Model(inputs=[user_input, item_input],
outputs=[gmf_output],
name="generalized_matrix_factorization")
def get_core_layers_kwdargs(self):
"""Returns the appropriate kwdargs for pretraining core layers.
Returns:
Tuple[Dict, Dict]: The keyword arguments for the user and item
dense layers.
"""
if not self.model:
raise RuntimeError("GMF is not trained.")
user_kernel, user_bias = self.model.layers[6].get_weights()
item_kernel, item_bias = self.model.layers[7].get_weights()
user_dense_kwdargs = {
"kernel_initializer": keras.initializers.Constant(user_kernel),
"bias_initializer": keras.initializers.Constant(user_bias)
}
item_dense_kwdargs = {
"kernel_initializer": keras.initializers.Constant(item_kernel),
"bias_initializer": keras.initializers.Constant(item_bias)
}
return user_dense_kwdargs, item_dense_kwdargs
def get_output_weights(self):
"""Returns the kernel and bias for the output layer of this model.
Returns:
List[ndarray, Optional[ndarray]]: The kernel and bias.
"""
if not self.model:
raise RuntimeError("GMF is not trained.")
return self.model.layers[-1].get_weights()
class MultiLayerPerceptron(KerasRecommender):
"""Recommender implementing the MLP architecture.
Args:
n_factors (int): The number of latent factors.
n_hidden_layers (int): The number of hidden layers.
epochs (int): The number of epochs to train the NN.
optimizer (keras.optimizers.Optimizer): The model's optimizer.
loss (keras.losses.Loss): The loss function.
metrics (List[keras.metrics.Metric, ...]): The metric functions.
seed (int): A random seed.
user_input (keras.Input): An input for the users.
item_input (keras.Input): An input for the items.
user_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the users.
item_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the items.
"""
def __init__(self,
n_factors=8,
n_hidden_layers=4,
epochs=10,
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()],
seed=None,
user_input=None,
item_input=None,
user_preprocessing_layers=None,
item_preprocessing_layers=None):
super().__init__(epochs,
optimizer,
loss,
metrics,
seed,
user_input,
item_input,
user_preprocessing_layers,
item_preprocessing_layers)
self.n_factors = n_factors
self.n_hidden_layers = n_hidden_layers
self.user_input = user_input
self.item_input = item_input
self.user_preprocessing_layers = user_preprocessing_layers
self.item_preprocessing_layers = item_preprocessing_layers
@staticmethod
def create_core_layers(n_factors,
n_hidden_layers,
user_layers,
item_layers,
hidden_layers_kwdargs=[]):
"""Creates the core layers of the MLP model.
Returns the hidden layers of the model. Specifically, the ones between
the inputs and the visible, output layer.
Args:
n_factors (int): The number of latent factors.
user_layers (keras.layers.Layer): The input or preprocessing layers
for the users.
item_layers (keras.layers.Layer): The input or preprocessing layers
for the items.
hidden_layers_kwdargs (List[Dict, ...]): The keyword
arguments for each
hidden layer.
Returns:
keras.layers.Layer: The core layers of the model.
"""
mlp_layers = keras.layers.Concatenate()([user_layers, item_layers])
for x, i in enumerate(range(n_hidden_layers)[::-1]):
current_kwdargs = {}
if x < len(hidden_layers_kwdargs):
current_kwdargs = hidden_layers_kwdargs[x]
mlp_layers = keras.layers.Dense(n_factors * (2 ** i),
activation="relu",
**current_kwdargs)(mlp_layers)
return mlp_layers
def create_model(self):
"""Creates a new MLP model."""
user_input = (self.user_input
if self.user_input is not None else
keras.Input(shape=(1), name="user", dtype="int64"))
item_input = (self.item_input
if self.item_input is not None else
keras.Input(shape=(1), name="item", dtype="int64"))
user_preprocessing_layers = (
self.user_preprocessing_layers
if self.user_preprocessing_layers is not None
else user_input
)
item_preprocessing_layers = (
self.item_preprocessing_layers
if self.item_preprocessing_layers is not None
else item_input
)
mlp_layers = MultiLayerPerceptron.create_core_layers(
self.n_factors,
self.n_hidden_layers,
user_preprocessing_layers,
item_preprocessing_layers
)
mlp_output = keras.layers.Dense(1,
activation="sigmoid",
use_bias=False)(mlp_layers)
return keras.Model(inputs=[user_input, item_input],
outputs=[mlp_output],
name="multi-layer_perceptron")
def get_core_layers_kwdargs(self):
"""Returns the appropriate kwdargs for pretraining core layers.
Returns:
Dict[String, Object]: The keyword arguments for the hidden layers.
"""
if not self.model:
raise RuntimeError("MLP is not trained.")
hidden_layers_kwdargs = []
for i in range(7, 7 + self.n_hidden_layers):
kernel, bias = self.model.layers[i].get_weights()
hidden_layers_kwdargs.append({
"kernel_initializer": keras.initializers.Constant(kernel),
"bias_initializer": keras.initializers.Constant(bias)
})
return hidden_layers_kwdargs
def get_output_weights(self):
"""Returns the kernel and bias for the output layer of this model.
Returns:
List[ndarray, Optional[ndarray]]: The kernel and bias.
"""
if not self.model:
raise RuntimeError("MLP is not trained.")
return [self.model.layers[-1].get_weights()[0], None]
class NeuralMatrixFactorization(KerasRecommender):
"""Recommender implementing the NeuMF architecture, an ensemble of GMF/MLP.
Args:
gmf_n_factors (int): The number of latent factors for GMF.
mlp_n_factors (int): The number of latent factors for MLP.
mlp_n_hidden_layers (int): The number of hidden layers.
gmf_trained (GeneralizedMatrixFactorization): A trained GMF model of
the same number of
factors.
mlp_trained (MultiLayerPerceptron): A trained MLP model of the same
number of factors and hidden
layers.
alpha (float): The tradeoff between MLP and GMF.
epochs (int): The number of epochs to train the NN.
optimizer (keras.optimizers.Optimizer): The model's optimizer.
loss (keras.losses.Loss): The loss function.
metrics (List[keras.metrics.Metric, ...]): The metric functions.
seed (int): A random seed.
user_input (keras.Input): An input for the users.
item_input (keras.Input): An input for the items.
user_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the users.
item_preprocessing_layers (keras.layers.Layer): Preprocessing layers
for the items.
"""
def __init__(self,
gmf_n_factors=8,
mlp_n_factors=8,
mlp_n_hidden_layers=4,
gmf_trained=None,
mlp_trained=None,
alpha=0.5,
epochs=10,
optimizer=keras.optimizers.SGD(),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()],
seed=None,
user_input=None,
item_input=None,
user_preprocessing_layers=None,
item_preprocessing_layers=None):
super().__init__(epochs,
optimizer,
loss,
metrics,
seed,
user_input,
item_input,
user_preprocessing_layers,
item_preprocessing_layers)
self.gmf_n_factors = gmf_n_factors
self.mlp_n_factors = mlp_n_factors
self.mlp_n_hidden_layers = mlp_n_hidden_layers
self.gmf_trained = gmf_trained
self.mlp_trained = mlp_trained
self.alpha = alpha
self.user_input = user_input
self.item_input = item_input
self.user_preprocessing_layers = user_preprocessing_layers
self.item_preprocessing_layers = item_preprocessing_layers
def create_model(self):
"""Creates a new NeuMF model.
Returns:
keras.Model: The NeuMF model. It will be pretrained if trained
models are provided in the constructor.
"""
user_input = (self.user_input
if self.user_input is not None else
keras.Input(shape=(1), name="user", dtype="int64"))
item_input = (self.item_input
if self.item_input is not None else
keras.Input(shape=(1), name="item", dtype="int64"))
user_preprocessing_layers = (
self.user_preprocessing_layers
if self.user_preprocessing_layers is not None
else user_input
)
item_preprocessing_layers = (
self.item_preprocessing_layers
if self.item_preprocessing_layers is not None
else item_input
)
user_dense_kwdargs = {}
item_dense_kwdargs = {}
hidden_layers_kwdargs = []
neumf_output_kernel = "glorot_uniform"
if self.gmf_trained and self.mlp_trained:
if self.gmf_trained.n_factors != self.gmf_n_factors:
raise RuntimeError("GMF factors are not consistent.")
if self.mlp_trained.n_factors != self.mlp_n_factors:
raise RuntimeError("MLP factors are not consistent.")
if self.mlp_trained.n_hidden_layers != self.mlp_n_hidden_layers:
raise RuntimeError("MLP factors are not consistent.")
user_dense_kwdargs, item_dense_kwdargs = (
self.gmf_trained.get_core_layers_kwdargs()
)
hidden_layers_kwdargs = self.mlp_trained.get_core_layers_kwdargs()
gmf_output_kernel, _ = self.gmf_trained.get_output_weights()
mlp_output_kernel, _ = self.mlp_trained.get_output_weights()
neumf_output_kernel = keras.initializers.Constant(
np.concatenate((gmf_output_kernel * self.alpha,
mlp_output_kernel * (1 - self.alpha)))
)
gmf_layers = GeneralizedMatrixFactorization.create_core_layers(
self.gmf_n_factors,
user_preprocessing_layers,
item_preprocessing_layers,
user_dense_kwdargs,
item_dense_kwdargs
)
mlp_layers = MultiLayerPerceptron.create_core_layers(
self.mlp_n_factors,
self.mlp_n_hidden_layers,
user_preprocessing_layers,
item_preprocessing_layers,
hidden_layers_kwdargs
)
neumf_layers = [gmf_layers, mlp_layers]
neumf_layers = keras.layers.Concatenate()(neumf_layers)
neumf_layers = (
keras.layers.Dense(1,
activation="sigmoid",
kernel_initializer=neumf_output_kernel,
kernel_constraint=keras.constraints.unit_norm(),
use_bias=False)(neumf_layers)
)
return keras.Model(inputs=[user_input, item_input],
outputs=[neumf_layers],
name="neural_matrix_factorization")
| [
[
[
7,
18
],
[
808,
810
],
[
1342,
1344
],
[
18217,
18219
]
],
[
[
44,
57
],
[
153,
166
]
],
[
[
81,
86
],
[
2434,
2439
],
[
2481,
2486
],
[
2542,
2547
],
[
8380,
8385
],
[
8427,
8432
],
[
8488,
8493
],
[
15114,
15119
],
[
15160,
15165
],
[
15221,
15226
],
[
4525,
4530
],
[
4603,
4608
],
[
4699,
4704
],
[
4948,
4953
],
[
5118,
5123
],
[
5735,
5740
],
[
5834,
5839
],
[
5902,
5907
],
[
6590,
6595
],
[
6664,
6669
],
[
6778,
6783
],
[
6852,
6857
],
[
10453,
10458
],
[
10736,
10741
],
[
11129,
11134
],
[
11299,
11304
],
[
11940,
11945
],
[
12108,
12113
],
[
12794,
12799
],
[
12867,
12872
],
[
16589,
16594
],
[
16759,
16764
],
[
18172,
18177
],
[
18933,
18938
],
[
19011,
19016
],
[
19206,
19211
],
[
19324,
19329
]
],
[
[
113,
129
],
[
1475,
1491
],
[
7325,
7341
],
[
13339,
13355
]
],
[
[
138,
152
]
],
[
[
1444,
1474
],
[
5547,
5577
],
[
18372,
18402
]
],
[
[
7304,
7324
],
[
11728,
11748
],
[
18628,
18648
]
],
[
[
13313,
13338
]
]
] |
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome, PhantomJS
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
TIMEOUT = 20
TIMESLP = 3
def login(driver, username, password, failed=0):
if failed == 3:
raise Exception('门户登录失败')
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
)
print('门户登陆中...')
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(TIMESLP)
driver.find_element_by_id('password').send_keys(password)
time.sleep(TIMESLP)
driver.find_element_by_id('logon_button').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
except:
pass
else:
driver.find_element_by_link_text('我知道了').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
except:
login(driver, username, password, failed + 1)
else:
print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def run(driver, username, password, campus, reason_in, reason_out, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason_out, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason_in, habitation, district, street)
print('=================================')
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason_out', type=str, help='出校原因, eg. 吃饭', default='回家')
parser.add_argument('--reason_in', type=str, help='出校原因, eg. 吃饭', default='学习')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='燕园大厦东门-上地街道')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='上地街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = PhantomJS(executable_path=phantomjs_path)
run(driver, args.username, args.password, args.campus, args.reason_in, args.reason_out,
args.destination, args.track, args.habitation, args.district,
args.street)
driver.close()
| [
[
[
39,
64
],
[
1233,
1235
],
[
1451,
1453
],
[
1734,
1736
],
[
1996,
1998
],
[
2225,
2227
],
[
2448,
2450
],
[
2593,
2595
],
[
2855,
2857
],
[
3084,
3086
],
[
4812,
4814
]
],
[
[
107,
120
],
[
1183,
1196
],
[
1401,
1414
],
[
1688,
1701
],
[
1950,
1963
],
[
2179,
2192
],
[
2402,
2415
],
[
2547,
2560
],
[
2809,
2822
],
[
3038,
3051
],
[
4766,
4779
]
],
[
[
163,
169
]
],
[
[
213,
217
]
],
[
[
259,
261
],
[
1267,
1269
],
[
1485,
1487
],
[
1768,
1770
],
[
2030,
2032
],
[
2259,
2261
],
[
2482,
2484
],
[
2627,
2629
],
[
2889,
2891
],
[
3118,
3120
],
[
4859,
4861
]
],
[
[
293,
300
]
],
[
[
302,
308
]
],
[
[
310,
319
],
[
8232,
8241
]
],
[
[
341,
350
]
],
[
[
372,
386
],
[
6794,
6808
]
],
[
[
412,
417
],
[
664,
669
]
],
[
[
425,
429
],
[
1005,
1009
],
[
1091,
1095
],
[
1871,
1875
],
[
2068,
2072
],
[
2155,
2159
],
[
2730,
2734
],
[
2927,
2931
],
[
3014,
3018
],
[
3245,
3249
],
[
3440,
3444
],
[
3648,
3652
],
[
3855,
3859
],
[
4083,
4087
],
[
4236,
4240
],
[
4392,
4396
],
[
4511,
4515
],
[
4630,
4634
],
[
5015,
5019
]
],
[
[
437,
441
],
[
7699,
7703
]
],
[
[
449,
452
],
[
7895,
7898
],
[
8010,
8013
]
],
[
[
460,
462
],
[
7955,
7957
],
[
8069,
8071
],
[
8167,
8169
]
],
[
[
464,
471
],
[
1205,
1212
],
[
1423,
1430
],
[
1710,
1717
],
[
1972,
1979
],
[
2201,
2208
],
[
2424,
2431
],
[
2569,
2576
],
[
2831,
2838
],
[
3060,
3067
],
[
4788,
4795
]
],
[
[
477,
484
],
[
1016,
1023
],
[
1102,
1109
],
[
1882,
1889
],
[
2079,
2086
],
[
2166,
2173
],
[
2741,
2748
],
[
2938,
2945
],
[
3025,
3032
],
[
3256,
3263
],
[
3451,
3458
],
[
3659,
3666
],
[
3866,
3873
],
[
4094,
4101
],
[
4247,
4254
],
[
4403,
4410
],
[
4522,
4529
],
[
4641,
4648
],
[
5026,
5033
]
],
[
[
495,
500
],
[
1521,
1526
],
[
6354,
6359
]
],
[
[
1608,
1629
],
[
6440,
6461
]
],
[
[
2301,
2321
],
[
6583,
6603
]
],
[
[
3154,
3167
],
[
5155,
5168
],
[
5746,
5759
]
],
[
[
3342,
3355
],
[
5236,
5249
]
],
[
[
3540,
3558
],
[
5407,
5425
]
],
[
[
3753,
3768
],
[
5988,
6003
]
],
[
[
3957,
3969
],
[
5322,
5334
],
[
5830,
5842
]
],
[
[
4109,
4120
],
[
5504,
5515
]
],
[
[
4262,
4274
],
[
6080,
6092
]
],
[
[
4418,
4429
],
[
5554,
5565
],
[
6159,
6170
]
],
[
[
4537,
4551
],
[
6132,
6146
]
],
[
[
4656,
4662
],
[
5578,
5584
],
[
6183,
6189
]
],
[
[
5041,
5049
],
[
6474,
6482
]
],
[
[
5623,
5630
],
[
6616,
6623
]
],
[
[
6228,
6231
],
[
8279,
8282
]
],
[
[
6785,
6791
],
[
6815,
6821
],
[
6881,
6887
],
[
6946,
6952
],
[
7039,
7045
],
[
7124,
7130
],
[
7208,
7214
],
[
7295,
7301
],
[
7387,
7393
],
[
7474,
7480
],
[
7563,
7569
],
[
7660,
7666
]
],
[
[
7653,
7657
],
[
7713,
7717
],
[
8291,
8295
],
[
8306,
8310
],
[
8321,
8325
],
[
8334,
8338
],
[
8350,
8354
],
[
8375,
8379
],
[
8393,
8397
],
[
8405,
8409
],
[
8422,
8426
],
[
8445,
8449
]
],
[
[
7685,
7696
],
[
7723,
7734
],
[
7790,
7801
]
],
[
[
7938,
7952
],
[
8258,
8272
]
],
[
[
8052,
8066
],
[
8258,
8272
]
],
[
[
8150,
8164
],
[
8258,
8272
]
],
[
[
8223,
8229
],
[
8283,
8289
],
[
8463,
8469
]
]
] |
import os
import pytest
import sys
import random
import tempfile
import time
import requests
from pathlib import Path
import ray
from ray.exceptions import RuntimeEnvSetupError
from ray._private.test_utils import (
run_string_as_driver, run_string_as_driver_nonblocking, wait_for_condition)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "58a73821fbfefbf53a19b6c7ffd71e70ccf258c7"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {"1.6.0": "5052fe67d99f1d4bfc81b2a8694dbf2aa807bbdc"}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_invalid_conda_env(shutdown_only):
ray.init()
@ray.remote
def f():
pass
start = time.time()
bad_env = {"conda": {"dependencies": ["this_doesnt_exist"]}}
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
first_time = time.time() - start
# Check that another valid task can run.
ray.get(f.remote())
# The second time this runs it should be faster as the error is cached.
start = time.time()
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
assert (time.time() - start) < (first_time / 2.0)
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"_system_config": {
"event_stats_print_interval_ms": 100,
"debug_dump_period_milliseconds": 100,
"event_stats": True
}
}],
indirect=True)
def test_no_spurious_worker_startup(ray_start_cluster):
"""Test that no extra workers start up during a long env installation."""
cluster = ray_start_cluster
# This hook sleeps for 15 seconds to simulate creating a runtime env.
cluster.add_node(
num_cpus=1,
runtime_env_setup_hook=(
"ray._private.test_utils.sleep_setup_runtime_env"))
# Set a nonempty runtime env so that the runtime env setup hook is called.
runtime_env = {"env_vars": {"a": "b"}}
ray.init(address=cluster.address)
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def get(self):
return self.value
# Instantiate an actor that requires the long runtime env installation.
a = Counter.options(runtime_env=runtime_env).remote()
assert ray.get(a.get.remote()) == 0
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
return int(line[len(num_workers_prefix):])
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
start = time.time()
wait_for_condition(lambda: get_num_workers() > 0)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# If any workers were unnecessarily started during the initial env
# installation, they will bypass the runtime env setup hook because the
# created env will have been cached and should be added to num_workers
# within a few seconds. Adjusting the default update period for
# debut_state.txt via this cluster_utils pytest fixture seems to be broken,
# so just check it for the next 10 seconds (the default period).
for i in range(100):
# Check that no more workers were started.
assert get_num_workers() <= 1
time.sleep(0.1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| [
[
[
7,
9
],
[
2426,
2428
],
[
2446,
2448
],
[
2494,
2496
],
[
10297,
10299
],
[
23152,
23154
],
[
23172,
23174
],
[
23653,
23655
],
[
23852,
23854
],
[
23872,
23874
],
[
24240,
24242
],
[
25834,
25836
],
[
26156,
26158
],
[
26581,
26583
],
[
26743,
26745
],
[
26921,
26923
]
],
[
[
17,
23
],
[
1898,
1904
],
[
2875,
2881
],
[
5517,
5523
],
[
5597,
5603
],
[
6258,
6264
],
[
6338,
6344
],
[
7771,
7777
],
[
7851,
7857
],
[
8552,
8558
],
[
8632,
8638
],
[
9335,
9341
],
[
9415,
9421
],
[
10075,
10081
],
[
10155,
10161
],
[
11001,
11007
],
[
11081,
11087
],
[
13271,
13277
],
[
13351,
13357
],
[
15872,
15878
],
[
15952,
15958
],
[
16551,
16557
],
[
16631,
16637
],
[
17799,
17805
],
[
17879,
17885
],
[
18625,
18631
],
[
18705,
18711
],
[
19930,
19936
],
[
20920,
20926
],
[
21809,
21815
],
[
22783,
22789
],
[
23674,
23680
],
[
25604,
25610
],
[
25900,
25906
],
[
26243,
26249
],
[
28558,
28564
],
[
29316,
29322
],
[
29412,
29418
],
[
31975,
31981
],
[
28058,
28064
],
[
28166,
28172
],
[
28350,
28356
],
[
28458,
28464
],
[
28853,
28859
],
[
29164,
29170
]
],
[
[
31,
34
],
[
2894,
2897
],
[
5536,
5539
],
[
6277,
6280
],
[
7790,
7793
],
[
8571,
8574
],
[
9354,
9357
],
[
10094,
10097
],
[
11020,
11023
],
[
13290,
13293
],
[
15891,
15894
],
[
16570,
16573
],
[
17818,
17821
],
[
18644,
18647
],
[
19949,
19952
],
[
20939,
20942
],
[
21828,
21831
],
[
22802,
22805
],
[
23693,
23696
],
[
25628,
25631
],
[
25924,
25927
],
[
26267,
26270
],
[
28582,
28585
],
[
29340,
29343
]
],
[
[
42,
48
],
[
3491,
3497
],
[
3536,
3542
],
[
3716,
3722
],
[
4219,
4225
],
[
4343,
4349
]
],
[
[
56,
64
],
[
1959,
1967
],
[
2998,
3006
],
[
5867,
5875
],
[
22963,
22971
],
[
23792,
23800
]
],
[
[
72,
76
],
[
28767,
28771
],
[
28964,
28968
],
[
29143,
29147
],
[
29271,
29275
],
[
31166,
31170
],
[
31250,
31254
],
[
31902,
31906
]
],
[
[
84,
92
],
[
24681,
24689
],
[
25095,
25103
],
[
25558,
25566
]
],
[
[
113,
117
],
[
2016,
2020
],
[
3151,
3155
],
[
8466,
8470
],
[
9249,
9253
],
[
10041,
10045
],
[
10915,
10919
],
[
11333,
11337
],
[
12846,
12850
],
[
12935,
12939
],
[
13023,
13027
],
[
13605,
13609
],
[
17631,
17635
],
[
18539,
18543
],
[
19497,
19501
],
[
19554,
19558
],
[
19844,
19848
],
[
23019,
23023
],
[
30700,
30704
]
],
[
[
126,
129
],
[
2684,
2687
],
[
2757,
2760
],
[
4662,
4665
],
[
5024,
5027
],
[
19665,
19668
],
[
19708,
19711
],
[
19763,
19766
],
[
23263,
23266
],
[
23965,
23968
],
[
24018,
24021
],
[
24196,
24199
],
[
25753,
25756
],
[
25868,
25871
],
[
26050,
26053
],
[
26209,
26212
],
[
26385,
26388
],
[
26414,
26417
],
[
26531,
26534
],
[
26615,
26618
],
[
26661,
26664
],
[
26777,
26780
],
[
26813,
26816
],
[
26974,
26977
],
[
27092,
27095
],
[
27314,
27317
],
[
27541,
27544
],
[
27960,
27963
],
[
27977,
27980
],
[
28119,
28122
],
[
28269,
28272
],
[
28411,
28414
],
[
28700,
28703
],
[
28717,
28720
],
[
28898,
28901
],
[
29034,
29037
],
[
29209,
29212
],
[
30171,
30174
],
[
30211,
30214
],
[
30504,
30507
],
[
30623,
30626
]
],
[
[
157,
177
],
[
28867,
28887
],
[
29178,
29198
]
],
[
[
220,
240
],
[
6191,
6211
],
[
6799,
6819
],
[
7069,
7089
],
[
7375,
7395
],
[
7683,
7703
],
[
8367,
8387
],
[
9150,
9170
],
[
9942,
9962
],
[
10816,
10836
],
[
12494,
12514
],
[
13130,
13150
],
[
15312,
15332
],
[
15676,
15696
],
[
16464,
16484
],
[
17532,
17552
],
[
18440,
18460
],
[
19288,
19308
],
[
20806,
20826
],
[
21692,
21712
],
[
22671,
22691
],
[
23592,
23612
]
],
[
[
242,
274
],
[
20453,
20485
],
[
21443,
21475
],
[
22341,
22373
]
],
[
[
276,
294
],
[
31182,
31200
]
],
[
[
328,
346
],
[
24445,
24463
]
],
[
[
348,
368
],
[
24964,
24984
]
],
[
[
402,
423
],
[
25426,
25447
]
],
[
[
432,
466
],
[
8512,
8514
],
[
9295,
9297
],
[
10961,
10963
],
[
17714,
17716
],
[
17756,
17758
],
[
18585,
18587
],
[
19440,
19442
],
[
19890,
19892
]
],
[
[
484,
489
],
[
20529,
20534
],
[
21493,
21498
],
[
22391,
22396
]
],
[
[
490,
503
],
[
6144,
6157
],
[
6756,
6769
],
[
7026,
7039
],
[
7332,
7345
],
[
7640,
7653
],
[
8324,
8337
],
[
9107,
9120
],
[
9899,
9912
],
[
10773,
10786
],
[
12451,
12464
],
[
13087,
13100
],
[
15269,
15282
],
[
15633,
15646
],
[
16421,
16434
],
[
17489,
17502
],
[
18397,
18410
],
[
19245,
19258
],
[
20363,
20376
],
[
20763,
20776
],
[
21399,
21412
],
[
21649,
21662
],
[
22297,
22310
],
[
22628,
22641
]
],
[
[
1773,
1784
],
[
11356,
11367
],
[
11409,
11420
],
[
11462,
11473
],
[
11515,
11526
],
[
11580,
11591
],
[
11645,
11656
],
[
11685,
11696
],
[
11725,
11736
]
],
[
[
1935,
1946
]
],
[
[
2518,
2537
],
[
5783,
5802
],
[
6539,
6558
],
[
8044,
8063
],
[
8813,
8832
],
[
9603,
9622
],
[
10433,
10452
],
[
11272,
11291
],
[
13544,
13563
],
[
16152,
16171
],
[
16816,
16835
],
[
18075,
18094
],
[
18902,
18921
],
[
20207,
20226
],
[
21197,
21216
],
[
22096,
22115
],
[
23331,
23350
]
],
[
[
2958,
2969
]
],
[
[
5655,
5677
]
],
[
[
6396,
6420
]
],
[
[
7909,
7925
]
],
[
[
8690,
8703
]
],
[
[
9473,
9493
]
],
[
[
10213,
10237
]
],
[
[
11139,
11153
]
],
[
[
13409,
13425
]
],
[
[
16010,
16033
]
],
[
[
16689,
16706
]
],
[
[
17937,
17956
]
],
[
[
18763,
18783
]
],
[
[
20013,
20040
]
],
[
[
21003,
21030
]
],
[
[
21892,
21919
]
],
[
[
22866,
22894
]
],
[
[
23757,
23766
]
],
[
[
24264,
24287
]
],
[
[
24725,
24750
]
],
[
[
25144,
25170
]
],
[
[
25703,
25722
]
],
[
[
25999,
26019
]
],
[
[
26342,
26364
]
],
[
[
27352,
27383
]
],
[
[
27906,
27939
]
],
[
[
28657,
28679
]
],
[
[
29667,
29698
]
],
[
[
31958,
31961
],
[
31966,
31969
]
]
] |
import MySQLdb as sql
from config import sqlconfig
class users:
def __init__():
self.connection = sql.connect(
host=sqlconfig.host, user=sqlconfig.user, passwd=sqlconfig.passwd
)
self.cursor = self.connection.cursor()
def __del__():
self.cursor.close()
self.connection.close()
def new_user(name, email):
self.cursor.execute(
"SELECT ID FROM dutchman.Users WHERE Email=%s",
[email]
)
existing_user = self.cursor.fetchall()
if len(existing_user) is not 0:
print("Existing User")
return
self.cursor.execute(
"INSERT INTO dutchman.Users(Name,Email) VALUES(%s,%s)",
(name, email)
)
self.connection.commit()
self.cursor.execute(
"SELECT ID FROM dutchman.Users WHERE Email=%s",
[email]
)
ID = self.cursor.fetchall()
ID = ID[0][0]
command = "CREATE TABLE " + email
query = ("CREATE DATABASE " + email,)
query.append(
command + ".inbox (ID INT NOT NULL AUTO_INCREMENT, \
message VARCHAR(2000) NOT NULL, recipient VARCHAR(200), \
read INT NOT NULL)"
)
query.append(
command + ".outbox (ID INT NOT NULL AUTO_INCREMENT, \
message VARCHAR(2000) NOT NULL, \
sender VARCHAR(200), \
read INT NOT NULL))"
)
query.append(
command + ".queries (ID INT NOT NULL AUTO_INCREMENT, \
message VARCHAR(2000) NOT NULL, \
response VARCHAR(2000))"
)
query.append(
command + ".friends (SR INT NOT NULL AUTO_INCREMENT, \
ID INT NOT NULL)"
)
self.cursor.executemany(query)
self.cursor.close()
self.connection.close()
def add_friend(user, friend):
self.cursor.execute(
"INSERT INTO " + user + ".friend (ID) VALUES(%s)", [friend]
)
self.connection.commit()
| [
[
[
7,
21
],
[
113,
116
]
],
[
[
41,
50
],
[
143,
152
],
[
164,
173
],
[
187,
196
]
],
[
[
59,
64
]
]
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
helper classes and functions
'''
import os, sys, string, hashlib
import re, textwrap
from unicodedata import normalize
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class DummyStream:
''' dummyStream behaves like a stream but does nothing. '''
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
def getAppPath():
'''Get the path to this script no matter how it's run.'''
#Determine if the application is a py/pyw or a frozen exe.
if hasattr(sys, 'frozen'):
# If run from exe
#dir_path = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
dir_path = os.path.dirname(sys.executable)
elif '__file__' in locals():
# If run from py
dir_path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
else:
# If run from command line
#dir_path = sys.path[0]
dir_path = os.getcwdu()
return dir_path
def getHomeDir():
if sys.platform == 'win32':
import winpaths
homedir = winpaths.get_common_appdata() # = e.g # = e.g 'C:\ProgramData'
else:
homedir = os.path.expanduser("~")
return homedir
def makeDir(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def ensureDir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return f
def _xorData(data):
"""Xor Method, Take a data Xor all bytes and return"""
data = [chr(ord(c) ^ 10) for c in data]
return string.join(data, '')
def readFile(path, offset=0, size=-1, xor_data=False):
"""Read specified block from file, using the given size and offset"""
fd = open(path, 'rb')
fd.seek(offset)
data = fd.read(size)
fd.close()
return _xorData(data) if xor_data else data
def writeFile(path, buf, offset=0, xor_data=False):
"""Write specified block on file at the given offset"""
if xor_data:
buf = _xorData(buf)
fd = open(path, 'wb')
fd.seek(offset)
fd.write(buf)
fd.close()
return len(buf)
def md5_for_file(f, block_size=2**20):
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def smart_strip(s, max_length=0):
s = s.strip()
if max_length == 0 or len(s) <= max_length:
return s
if max_length > 3:
return s[:-(len(s) - max_length + 3)].strip() + '...'
else:
return s[:-(len(s) - max_length)].strip()
def strip_by_word(the_string, width):
if width <= 0:
return the_string.strip()
s = the_string
if len(the_string) > width:
s = textwrap.wrap(s, width)[0]
if s[-1:] in [u'.', u',', u'?', u'!', u';', u'-', u':']:
s = s[:-1].strip()
if len(s) < len(the_string):
s += '...'
return s | [
[
[
87,
89
],
[
806,
808
],
[
915,
917
],
[
1075,
1077
],
[
1297,
1299
],
[
1372,
1374
],
[
1399,
1401
],
[
1454,
1456
],
[
1484,
1486
],
[
1511,
1513
]
],
[
[
91,
94
],
[
655,
658
],
[
822,
825
],
[
949,
952
],
[
1139,
1142
]
],
[
[
96,
102
],
[
1674,
1680
]
],
[
[
104,
111
],
[
2272,
2279
]
],
[
[
119,
121
]
],
[
[
123,
131
],
[
2857,
2865
]
],
[
[
156,
165
]
],
[
[
198,
206
]
],
[
[
252,
260
]
],
[
[
273,
284
]
],
[
[
501,
511
]
],
[
[
1118,
1128
]
],
[
[
1349,
1356
]
],
[
[
1432,
1441
]
],
[
[
1544,
1552
],
[
1923,
1931
],
[
2108,
2116
]
],
[
[
1701,
1709
]
],
[
[
1969,
1978
]
],
[
[
2227,
2239
]
],
[
[
2433,
2444
]
],
[
[
2702,
2715
]
]
] |
import cv2
import numpy as np
#load colored image
img_1 = cv2.imread("Images\\sunflower.png", 1)
#load grayscale image
img_2 = cv2.imread("Images\\sunflower.png", 0)
#resizing images
resized_img_1 = cv2.resize(img_1, (int(img_1.shape[1]/2), int(img_1.shape[0]/2)))
#printing images' shape(dimension)
print(img_1.shape)
print(img_2.shape)
#displaying the loaded images
cv2.imshow("Colored Image", img_1)
cv2.imshow("Grayscale Image", img_2)
cv2.imshow("Resized Image", resized_img_1)
cv2.waitKey(0)
#cv2.waitKey(2000)
cv2.destroyAllWindows()
| [
[
[
7,
10
],
[
63,
66
],
[
136,
139
],
[
212,
215
],
[
390,
393
],
[
426,
429
],
[
464,
467
],
[
510,
513
],
[
546,
549
]
],
[
[
19,
30
]
],
[
[
55,
60
],
[
223,
228
],
[
235,
240
],
[
258,
263
],
[
323,
328
],
[
418,
423
]
],
[
[
128,
133
],
[
343,
348
],
[
456,
461
]
],
[
[
196,
209
],
[
492,
505
]
]
] |
import glob
import json
from docutils import nodes
from sphinx.util.docutils import SphinxDirective
from sphinx.util import logging
def add_prop_attr_row(prop, attr, tbody, key = None):
desc_row = nodes.row()
tbody += desc_row
desc_row += nodes.entry()
desc_key_entry = nodes.entry()
desc_row += desc_key_entry
desc_key_entry += nodes.strong(text = key if key else attr)
desc_text_entry = nodes.entry()
desc_row += desc_text_entry
desc_text_entry += nodes.paragraph(text = prop[attr])
def add_table_row(prop, tbody):
# Create title / header row for this schema property.
title_row = nodes.row()
tbody += title_row
title_entry = nodes.entry(morecols = 2)
title_row += title_entry
title_entry += nodes.strong(text = prop['title'])
# Add the property description, if available.
if 'description' in prop:
add_prop_attr_row(prop, 'description', tbody)
# Add the property type, if available.
if 'type' in prop:
add_prop_attr_row(prop, 'type', tbody)
if prop['type'] == 'array':
# Process 'items' keyword with constraints on item types.
if 'type' in prop['items']:
add_prop_attr_row(prop['items'], 'type', tbody, key = 'entry type')
if 'pattern' in prop['items']:
add_prop_attr_row(prop['items'], 'pattern', tbody, key = 'entry regex match')
if 'minLength' in prop['items']:
add_prop_attr_row(prop['items'], 'minLength', tbody, key = 'minimum length for each entry')
# Add type-specific parameters: 'minLength/pattern' for strings,
if 'pattern' in prop:
add_prop_attr_row(prop, 'pattern', tbody, key = 'regex match')
if 'minLength' in prop:
add_prop_attr_row(prop, 'minLength', tbody, key = 'minimum string length')
# Add default value, if available.
if 'default' in prop:
add_prop_attr_row(prop, 'type', tbody, key = 'default value')
# Main Sphinx plugin
class RemoteAPIGen(SphinxDirective):
def run(self):
logger = logging.getLogger(__name__)
# List of documentation objects to return.
new_doc = []
# Get the JSONSchema API files to include.
api_schemas = glob.iglob('server_schema/*.json')
# Create a top-level section node to contain the individual API tables.
top_section = nodes.section(ids = [nodes.make_id('server_top')])
# Process each API individually, adding its doc components to the
# array which we will return.
for schema_file in api_schemas:
with open(schema_file, 'r') as sf:
api_schema = json.loads(sf.read())
# Create a title heading for the section.
title_str = api_schema['title'].strip('/')
section = nodes.section(ids = [nodes.make_id(title_str)])
section += nodes.title(text = title_str)
# Add a table describing the schema.
relevant_keys = ['description', 'type', 'minLength', 'pattern', 'items', 'default']
table = nodes.table()
tgroup = nodes.tgroup(cols = 3)
tgroup += nodes.colspec(colwidth=10)
tgroup += nodes.colspec(colwidth=50)
tgroup += nodes.colspec(colwidth=100)
table += tgroup
tbody = nodes.tbody()
tgroup += tbody
for prop in api_schema['properties']:
add_table_row(api_schema['properties'][prop], tbody)
section += table
# Add the endpoint's description.
section += nodes.paragraph(text = api_schema['description'])
top_section += section
# Done; return the array of document objects.
new_doc += top_section
return new_doc
def setup(app):
app.add_directive('clientservergen', RemoteAPIGen)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| [
[
[
7,
11
],
[
2242,
2246
]
],
[
[
19,
23
],
[
2660,
2664
]
],
[
[
46,
51
],
[
204,
209
],
[
254,
259
],
[
289,
294
],
[
356,
361
],
[
420,
425
],
[
489,
494
],
[
631,
636
],
[
684,
689
],
[
758,
763
],
[
2380,
2385
],
[
2401,
2406
],
[
2814,
2819
],
[
2835,
2840
],
[
2885,
2890
],
[
3080,
3085
],
[
3115,
3120
],
[
3160,
3165
],
[
3209,
3214
],
[
3258,
3263
],
[
3334,
3339
],
[
3593,
3598
]
],
[
[
85,
100
],
[
2013,
2028
]
],
[
[
126,
133
],
[
2067,
2074
]
],
[
[
139,
156
],
[
881,
898
],
[
1001,
1018
],
[
1202,
1219
],
[
1329,
1346
],
[
1468,
1485
],
[
1663,
1680
],
[
1762,
1779
],
[
1910,
1927
]
],
[
[
529,
542
],
[
3442,
3455
]
],
[
[
2000,
2012
],
[
3845,
3857
]
],
[
[
3792,
3797
]
]
] |
# Works on Linux .sym files generated using the nm command
# Like this:
# nm -CSr --size-sort StereoKitC.sym > size.txt
import re
data = {}
file1 = open('size.txt', 'r')
while True:
line = file1.readline()
# if line is empty end of file is reached
if not line:
break
matches = re.search("(\w*) (\w*) (\w*) ([^_:<(]*)(?:[_:<(]*)([^_:<(]*)", line)
size = int(matches[2], 16)
namespace = matches[4]
if ' ' in namespace:
namespace = namespace.split(' ')[1]
namespace = namespace.replace('\n', '')
if namespace not in data:
data[namespace] = size
else:
data[namespace] += size
file1.close()
data = dict(sorted(data.items(), key=lambda item: item[1]))
for key in data:
size = data[key]
if size < 1024: continue
size = round( size / 1024 )
print(key + " " + str(size) + "kb")
| [
[
[
128,
130
],
[
307,
309
]
],
[
[
132,
136
],
[
578,
582
],
[
592,
596
],
[
633,
637
],
[
691,
695
]
],
[
[
143,
148
],
[
196,
201
],
[
657,
662
]
],
[
[
189,
193
],
[
272,
276
],
[
370,
374
]
],
[
[
297,
304
],
[
392,
399
],
[
424,
431
]
],
[
[
381,
385
],
[
610,
614
],
[
652,
656
]
],
[
[
412,
421
],
[
450,
459
],
[
481,
490
],
[
521,
530
]
],
[
[
469,
478
],
[
521,
530
]
],
[
[
509,
518
],
[
561,
570
],
[
597,
606
],
[
638,
647
]
],
[
[
672,
676
],
[
743,
747
],
[
760,
764
]
],
[
[
736,
739
],
[
765,
768
],
[
841,
844
]
],
[
[
753,
757
],
[
777,
781
],
[
817,
821
]
],
[
[
803,
807
],
[
857,
861
]
]
] |
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{
'includes': [
'../../common.gypi',
],
'targets': [
{
'target_name': 'ionbase_test',
'includes': [
'../../dev/test_target.gypi',
],
'sources' : [
'allocatable_test.cc',
'allocationmanager_test.cc',
'allocator_test.cc',
'array2_test.cc',
'calllist_test.cc',
'circularbuffer_test.cc',
'datacontainer_test.cc',
'datetime_test.cc',
'enumhelper_test.cc',
'fullallocationtracker_test.cc',
'functioncall_test.cc',
'incompletetype.cc',
'incompletetype.h',
'indexmap_test.cc',
'invalid_test.cc',
'logchecker_test.cc',
'logging_test.cc',
'memoryzipstream_test.cc',
'notifier_test.cc',
'nulllogentrywriter_test.cc',
'once_test.cc',
'readwritelock_test.cc',
'scalarsequence_test.cc',
'scopedallocation_test.cc',
'serialize_test.cc',
'setting_test.cc',
'settingmanager_test.cc',
'sharedptr_test.cc',
'signal_test.cc',
'spinmutex_test.cc',
'staticsafedeclare_test.cc',
'stlallocator_test.cc',
'stringutils_test.cc',
'threadlocalobject_test.cc',
'threadspawner_test.cc',
'type_structs_test.cc',
'utf8iterator_test.cc',
'variant_test.cc',
'varianttyperesolver_test.cc',
'vectordatacontainer_test.cc',
'weakreferent_test.cc',
'workerpool_test.cc',
'zipassetmanager_test.cc',
],
'conditions': [
# Threads don't exist in asmjs, so remove those tests.
['OS == "asmjs"', {
'sources!': [
'readwritelock_test.cc',
'threadlocalobject_test.cc',
'threadspawner_test.cc',
'workerpool_test.cc',
],
}],
],
'dependencies' : [
'base_tests_assets',
'<(ion_dir)/base/base.gyp:ionbase_for_tests',
'<(ion_dir)/external/gtest.gyp:iongtest_safeallocs',
'<(ion_dir)/port/port.gyp:ionport',
],
},
{
'target_name': 'base_tests_assets',
'type': 'static_library',
'includes': [
'../../dev/zipasset_generator.gypi',
],
'sources' : [
'data/zipasset.iad',
],
'dependencies' : [
'<(ion_dir)/port/port.gyp:ionport',
],
},
],
}
| [] |
""" TSP SIMULATED ANNEALING """
# Imports
import math
import numpy as np
# read data from file
f = open("TSP-configurations/eil51.tsp.txt", "r")
# f = open("TSP-configurations/a280.tsp.txt", "r")
# f = open("TSP-configurations/pcb442.tsp.txt", "r")
network = f.readlines()[6:-1]
# create dictionary to store coordinates
nodes = dict()
# split data and put in dict
for node in network:
node = list(map(int, (list(filter(None, node.rstrip().rsplit(' '))))))
nodes[node[0]] = node[1:]
# calculate distance between 2 nodes
def get_distance(dictionary, city1, city2):
x = dictionary[city1][0] - dictionary[city2][0]
y = dictionary[city1][1] - dictionary[city2][1]
return math.sqrt(x**2 + y**2)
# def get_distance(dictionary, city1, city2):
# x = dictionary[city1][0][0] - dictionary[city2][0][0]
# y = dictionary[city1][0][1] - dictionary[city2][0][1]
# return math.sqrt(x**2 + y**2)
# calculate the total distance
def total_distance(tour, dictionary):
distance = 0
for i in range(len(tour)-1):
distance += get_distance(dictionary, tour[i], tour[i+1])
return distance
# add nearest neighbors in order of nearest to most far
for node in range(1,len(nodes)+1):
t_dict = dict()
tour = [i for i in nodes.keys()]
tour.remove(node)
for j in tour:
t_dict[j] = get_distance(nodes, node, j)
nodes[node].append(sorted(t_dict.items(), key=lambda x: x[1]))
print(nodes)
def SA(coordinates, tour, temp, coolingdown, mlength, swap = False, start_node=True):
if start_node == True:
a, c = [tour[0]], [tour[0]]
b = tour[1:]
np.random.shuffle(b)
tour = a + b + c
else:
np.random.shuffle(tour)
print(f'\nInitial solution: {tour}\n')
# Initial costs
costs = total_distance(tour, coordinates)
for i in range(1000): # Parameter
print(i, 'cost=', costs)
temp = coolingdown(temp)
if temp == 0:
print("Temperature of 0 reached")
return tour, costs
for j in range(mlength): # Parameter
if swap == True:
# Exchange two coordinates and get a candidate solution solution
c1, c2 = np.random.randint(1, len(tour)-1, size = 2)
# Swap coordinates
tour[c1], tour[c2] = tour[c2], tour[c1]
else:
randindex = np.random.randint(1,len(tour)-2)
randcity = np.random.randint(2,len(tour)-1)
c2_i = tour.index(randcity)
tour.remove(randcity)
# print(f'city {c2} removed out of index {c2_i}')
tour.insert(randindex, randcity)
# get the new costs
cost_n = total_distance(tour, coordinates)
# replace old costs if new costs is less
if cost_n < costs:
costs = cost_n
else:
# Generate random probability
x = np.random.uniform()
# If prob < formula accept candidate solution
if x < min(1, math.exp(-(cost_n-costs)/temp)):
costs = cost_n
else:
if swap == True:
# Swap back to prior solution
tour[c1], tour[c2] = tour[c2], tour[c1]
else:
tour.remove(randcity)
tour.insert(c2_i, randcity)
return tour, costs, temp
def candidate_solution():
return
def cooling(temp):
"""
Cooling down function
:param temp: (float) temperature
:return: (float) new temperature
"""
return temp - np.log(temp)
Temperature = 1000 # Parameter
MCL = 500 # Markov Chain Length (inner loop)
# Get node names
initial_tour = [i for i in nodes.keys()]
print(SA(nodes, initial_tour, Temperature, cooling, MCL)) | [
[
[
50,
54
],
[
692,
696
],
[
3093,
3097
]
],
[
[
62,
73
],
[
1630,
1632
],
[
1694,
1696
],
[
2220,
2222
],
[
2402,
2404
],
[
2462,
2464
],
[
2980,
2982
],
[
3685,
3687
]
],
[
[
97,
98
],
[
262,
263
]
],
[
[
252,
259
],
[
381,
388
]
],
[
[
324,
329
],
[
469,
474
],
[
1207,
1212
],
[
1261,
1266
],
[
1350,
1355
],
[
1371,
1376
],
[
1441,
1446
],
[
3819,
3824
],
[
3843,
3848
]
],
[
[
373,
377
],
[
434,
438
]
],
[
[
394,
398
],
[
486,
490
],
[
475,
479
]
],
[
[
537,
549
],
[
1337,
1349
],
[
1059,
1071
]
],
[
[
954,
968
],
[
1799,
1813
],
[
2746,
2760
]
],
[
[
1187,
1191
],
[
1291,
1295
],
[
1357,
1361
],
[
1377,
1381
]
],
[
[
1222,
1228
],
[
1325,
1331
],
[
1397,
1403
]
],
[
[
1242,
1246
],
[
1279,
1283
],
[
1311,
1315
]
],
[
[
1306,
1307
],
[
1363,
1364
],
[
1332,
1333
]
],
[
[
1455,
1457
],
[
3840,
3842
]
],
[
[
3497,
3515
]
],
[
[
3535,
3542
],
[
3877,
3884
]
],
[
[
3699,
3710
],
[
3864,
3875
]
],
[
[
3730,
3733
],
[
3886,
3889
]
],
[
[
3792,
3804
],
[
3850,
3862
]
]
] |
from itertools import combinations
__author__ = "\n".join(['Ben Edwards (bedwards@cs.unm.edu)',
'Huston Hedinger (h@graphalchemist.com)',
'Dan Schult (dschult@colgate.edu)'])
__all__ = ['dispersion']
def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
r"""Calculate dispersion between `u` and `v` in `G`.
A link between two actors (`u` and `v`) has a high dispersion when their
mutual ties (`s` and `t`) are not well connected with each other.
Parameters
----------
G : graph
A NetworkX graph.
u : node, optional
The source for the dispersion score (e.g. ego node of the network).
v : node, optional
The target of the dispersion score if specified.
normalized : bool
If True (default) normalize by the embededness of the nodes (u and v).
Returns
-------
nodes : dictionary
If u (v) is specified, returns a dictionary of nodes with dispersion
score for all "target" ("source") nodes. If neither u nor v is
specified, returns a dictionary of dictionaries for all nodes 'u' in the
graph with a dispersion score for each node 'v'.
Notes
-----
This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical
usage would be to run dispersion on the ego network $G_u$ if $u$ were
specified. Running :func:`dispersion` with neither $u$ nor $v$ specified
can take some time to complete.
References
----------
.. [1] Romantic Partnerships and the Dispersion of Social Ties:
A Network Analysis of Relationship Status on Facebook.
Lars Backstrom, Jon Kleinberg.
https://arxiv.org/pdf/1310.6753v1.pdf
"""
def _dispersion(G_u, u, v):
"""dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
u_nbrs = set(G_u[u])
ST = set(n for n in G_u[v] if n in u_nbrs)
set_uv = set([u, v])
# all possible ties of connections that u and b share
possib = combinations(ST, 2)
total = 0
for (s, t) in possib:
# neighbors of s that are in G_u, not including u and v
nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
# s and t are not directly connected
if t not in nbrs_s:
# s and t do not share a connection
if nbrs_s.isdisjoint(G_u[t]):
# tick for disp(u, v)
total += 1
# neighbors that u and v share
embededness = len(ST)
if normalized:
if embededness + c != 0:
norm_disp = ((total + b)**alpha) / (embededness + c)
else:
norm_disp = (total + b)**alpha
dispersion = norm_disp
else:
dispersion = total
return dispersion
if u is None:
# v and u are not specified
if v is None:
results = dict((n, {}) for n in G)
for u in G:
for v in G[u]:
results[u][v] = _dispersion(G, u, v)
# u is not specified, but v is
else:
results = dict.fromkeys(G[v], {})
for u in G[v]:
results[u] = _dispersion(G, v, u)
else:
# u is specified with no target v
if v is None:
results = dict.fromkeys(G[u], {})
for v in G[u]:
results[v] = _dispersion(G, u, v)
# both u and v are specified
else:
results = _dispersion(G, u, v)
return results
| [
[
[
22,
34
],
[
2060,
2072
]
],
[
[
36,
46
]
],
[
[
225,
232
]
],
[
[
256,
266
]
]
] |
#!/usr/bin/env python3
# 2017, Georg Sauthoff <mail@gms.tf>, GPLv3
import sys
def skip_comments(lines):
state = 0
for line in lines:
n = len(line)
l = ''
p = 0
while p < n:
if state == 0:
a = line.find('//', p)
b = line.find('/*', p)
if a > -1 and (a < b or b == -1):
l += line[p:a]
p = n
elif b > -1 and (b < a or a == -1):
l += line[p:b]
p = b+2
state = 1
else:
l += line[p:]
p = n
elif state == 1:
a = line.rfind('*/', p)
if a == -1:
p = n
else:
p = a + 2
state = 0
yield l
def cond_lines(lines):
state = 0
pcnt = 0
for nr, line in enumerate(lines, 1):
if not line:
continue
n = len(line)
p = 0
do_yield = False
while p < n:
if state == 0:
p = line.find('if', p)
if p == -1:
p = n
continue
if (p == 0 or not line[p-1].isalpha()) \
and (p+2 == len(line) or not line[p+2].isalpha()):
do_yield = True
state = 1
p += 2
elif state == 1:
do_yield = True
p = line.find('(', p)
if p == -1:
p = n
else:
p += 1
state = 2
pcnt = 1
elif state == 2:
do_yield = True
for p in range(p, n):
if line[p] == '(':
pcnt += 1
elif line[p] == ')':
pcnt -= 1
if not pcnt:
state = 0
break
p += 1
if do_yield:
yield nr
def cond_lines_from_file(filename):
with open(filename) as f:
yield from cond_lines(skip_comments(f))
def filter_lcov_trace(lines):
nrs = set()
for line in lines:
if line.startswith('SF:'):
nrs = set(cond_lines_from_file(line[3:-1]))
elif line.startswith('BRDA:'):
xs = line[5:].split(',')
nr = int(xs[0]) if xs else 0
if nr not in nrs:
continue
yield line
def filter_lcov_trace_file(s_filename, d_file):
with open(s_filename) as f:
for l in filter_lcov_trace(f):
print(l, end='', file=d_file)
if __name__ == '__main__':
#for l in cond_lines_from_file(sys.argv[1]):
# print(l)
filter_lcov_trace_file(sys.argv[1], sys.stdout)
#with open(sys.argv[1]) as f:
# for l in skip_comments(f):
# print(l)
| [
[
[
80,
83
],
[
2399,
2402
],
[
2412,
2415
]
],
[
[
91,
104
],
[
1789,
1802
]
],
[
[
720,
730
],
[
1778,
1788
]
],
[
[
1701,
1721
],
[
1926,
1946
]
],
[
[
1814,
1831
],
[
2219,
2236
]
],
[
[
2130,
2152
],
[
2376,
2398
]
]
] |
from .models import User
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import (
RedirectView,
UpdateView,
DetailView,
CreateView,
ListView,
)
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
| [
[
[
20,
24
]
],
[
[
57,
71
],
[
287,
301
]
],
[
[
111,
129
],
[
327,
345
],
[
507,
525
],
[
867,
885
]
],
[
[
154,
161
],
[
627,
634
],
[
973,
980
]
],
[
[
201,
213
],
[
887,
899
]
],
[
[
219,
229
],
[
527,
537
]
],
[
[
235,
245
],
[
347,
357
]
],
[
[
251,
261
]
],
[
[
267,
275
]
],
[
[
280,
284
],
[
373,
377
],
[
553,
557
],
[
742,
746
]
],
[
[
312,
326
],
[
459,
473
]
],
[
[
440,
456
]
],
[
[
492,
506
],
[
817,
831
]
],
[
[
798,
814
]
],
[
[
850,
866
],
[
1069,
1085
]
],
[
[
1048,
1066
]
]
] |
"""The moon component."""
| [] |
import http
from typing import Optional
from fastapi import FastAPI, Path, Query
app = FastAPI()
@app.api_route("/api_route")
def non_operation():
return {"message": "Hello World"}
def non_decorated_route():
return {"message": "Hello World"}
app.add_api_route("/non_decorated_route", non_decorated_route)
@app.get("/text")
def get_text():
return "Hello World"
@app.get("/path/{item_id}")
def get_id(item_id):
return item_id
@app.get("/path/str/{item_id}")
def get_str_id(item_id: str):
return item_id
@app.get("/path/int/{item_id}")
def get_int_id(item_id: int):
return item_id
@app.get("/path/float/{item_id}")
def get_float_id(item_id: float):
return item_id
@app.get("/path/bool/{item_id}")
def get_bool_id(item_id: bool):
return item_id
@app.get("/path/param/{item_id}")
def get_path_param_id(item_id: str = Path()):
return item_id
@app.get("/path/param-required/{item_id}")
def get_path_param_required_id(item_id: str = Path()):
return item_id
@app.get("/path/param-minlength/{item_id}")
def get_path_param_min_length(item_id: str = Path(min_length=3)):
return item_id
@app.get("/path/param-maxlength/{item_id}")
def get_path_param_max_length(item_id: str = Path(max_length=3)):
return item_id
@app.get("/path/param-min_maxlength/{item_id}")
def get_path_param_min_max_length(item_id: str = Path(max_length=3, min_length=2)):
return item_id
@app.get("/path/param-gt/{item_id}")
def get_path_param_gt(item_id: float = Path(gt=3)):
return item_id
@app.get("/path/param-gt0/{item_id}")
def get_path_param_gt0(item_id: float = Path(gt=0)):
return item_id
@app.get("/path/param-ge/{item_id}")
def get_path_param_ge(item_id: float = Path(ge=3)):
return item_id
@app.get("/path/param-lt/{item_id}")
def get_path_param_lt(item_id: float = Path(lt=3)):
return item_id
@app.get("/path/param-lt0/{item_id}")
def get_path_param_lt0(item_id: float = Path(lt=0)):
return item_id
@app.get("/path/param-le/{item_id}")
def get_path_param_le(item_id: float = Path(le=3)):
return item_id
@app.get("/path/param-lt-gt/{item_id}")
def get_path_param_lt_gt(item_id: float = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge/{item_id}")
def get_path_param_le_ge(item_id: float = Path(le=3, ge=1)):
return item_id
@app.get("/path/param-lt-int/{item_id}")
def get_path_param_lt_int(item_id: int = Path(lt=3)):
return item_id
@app.get("/path/param-gt-int/{item_id}")
def get_path_param_gt_int(item_id: int = Path(gt=3)):
return item_id
@app.get("/path/param-le-int/{item_id}")
def get_path_param_le_int(item_id: int = Path(le=3)):
return item_id
@app.get("/path/param-ge-int/{item_id}")
def get_path_param_ge_int(item_id: int = Path(ge=3)):
return item_id
@app.get("/path/param-lt-gt-int/{item_id}")
def get_path_param_lt_gt_int(item_id: int = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge-int/{item_id}")
def get_path_param_le_ge_int(item_id: int = Path(le=3, ge=1)):
return item_id
@app.get("/query")
def get_query(query):
return f"foo bar {query}"
@app.get("/query/optional")
def get_query_optional(query=None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int")
def get_query_type(query: int):
return f"foo bar {query}"
@app.get("/query/int/optional")
def get_query_type_optional(query: Optional[int] = None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int/default")
def get_query_type_int_default(query: int = 10):
return f"foo bar {query}"
@app.get("/query/param")
def get_query_param(query=Query(default=None)):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/param-required")
def get_query_param_required(query=Query()):
return f"foo bar {query}"
@app.get("/query/param-required/int")
def get_query_param_required_type(query: int = Query()):
return f"foo bar {query}"
@app.get("/enum-status-code", status_code=http.HTTPStatus.CREATED)
def get_enum_status_code():
return "foo bar"
| [
[
[
7,
11
],
[
4074,
4078
]
],
[
[
31,
39
],
[
3428,
3436
]
],
[
[
61,
68
],
[
89,
96
]
],
[
[
70,
74
],
[
866,
870
],
[
985,
989
],
[
1104,
1108
],
[
1235,
1239
],
[
1374,
1378
],
[
1506,
1510
],
[
1618,
1622
],
[
1728,
1732
],
[
1838,
1842
],
[
1950,
1954
],
[
2060,
2064
],
[
2176,
2180
],
[
2298,
2302
],
[
2420,
2424
],
[
2536,
2540
],
[
2652,
2656
],
[
2768,
2772
],
[
2890,
2894
],
[
3018,
3022
]
],
[
[
76,
81
],
[
3693,
3698
],
[
3863,
3868
],
[
3990,
3995
]
],
[
[
83,
86
],
[
102,
105
],
[
258,
261
],
[
324,
327
],
[
385,
388
],
[
455,
458
],
[
538,
541
],
[
621,
624
],
[
710,
713
],
[
796,
799
],
[
897,
900
],
[
1016,
1019
],
[
1147,
1150
],
[
1278,
1281
],
[
1431,
1434
],
[
1541,
1544
],
[
1653,
1656
],
[
1763,
1766
],
[
1873,
1876
],
[
1985,
1988
],
[
2095,
2098
],
[
2217,
2220
],
[
2339,
2342
],
[
2455,
2458
],
[
2571,
2574
],
[
2687,
2690
],
[
2803,
2806
],
[
2931,
2934
],
[
3059,
3062
],
[
3132,
3135
],
[
3275,
3278
],
[
3362,
3365
],
[
3531,
3534
],
[
3643,
3646
],
[
3795,
3798
],
[
3906,
3909
],
[
4033,
4036
]
],
[
[
134,
147
]
],
[
[
195,
214
],
[
300,
319
]
],
[
[
345,
353
]
],
[
[
416,
422
]
],
[
[
490,
500
]
],
[
[
573,
583
]
],
[
[
658,
670
]
],
[
[
746,
757
]
],
[
[
833,
850
]
],
[
[
943,
969
]
],
[
[
1063,
1088
]
],
[
[
1194,
1219
]
],
[
[
1329,
1358
]
],
[
[
1471,
1488
]
],
[
[
1582,
1600
]
],
[
[
1693,
1710
]
],
[
[
1803,
1820
]
],
[
[
1914,
1932
]
],
[
[
2025,
2042
]
],
[
[
2138,
2158
]
],
[
[
2260,
2280
]
],
[
[
2383,
2404
]
],
[
[
2499,
2520
]
],
[
[
2615,
2636
]
],
[
[
2731,
2752
]
],
[
[
2850,
2874
]
],
[
[
2978,
3002
]
],
[
[
3081,
3090
]
],
[
[
3163,
3181
]
],
[
[
3301,
3315
]
],
[
[
3397,
3420
]
],
[
[
3565,
3591
]
],
[
[
3671,
3686
]
],
[
[
3832,
3856
]
],
[
[
3947,
3976
]
],
[
[
4103,
4123
]
]
] |
import argparse
import sys
import os
import subprocess
from pathlib import Path
import threading
def main():
# Cd to scripts/build.py directory
os.chdir(os.path.dirname(__file__))
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional arguments
parser.add_argument("-r", "--remove-build-dir", help = "Remove build directory", action="store_true")
parser.add_argument("-c", "--cryptopp", help = "Install cryptopp with make", action="store_true")
parser.add_argument("-d", "--enet", help = "Install ENet with make", action="store_true")
parser.add_argument("-m", "--make", help = "Make", action="store_true")
parser.add_argument("-n", "--ninja", help = "Make", action="store_true")
parser.add_argument("-u", "--uninstall", help = "Make uninstall", action="store_true")
parser.add_argument("-t", "--tests", help = "Run tests", action="store_true")
parser.add_argument("-e", "--execute", help = "Execute binary after install", action="store_true")
parser.add_argument("-p", "--packinstall", help = "CPack + installation of deb", action="store_true")
parser.add_argument("-s", "--send", help = "Send deb to servers", action="store_true")
# Read arguments from command line
args = parser.parse_args()
# Do not use ELIF, combining options doesn't work then
if args.remove_build_dir:
if os.path.exists(project_path("build")):
subprocess.call('rm -r ' + project_path("build"), shell=True) # suppose we're in ./scripts directory
if args.cryptopp:
subprocess.call('cd ' + project_path("build") + \
' && git clone --recursive https://github.com/weidai11/cryptopp.git' \
' && cd cryptopp' \
' && wget -O CMakeLists.txt https://raw.githubusercontent.com/noloader/cryptopp-cmake/master/CMakeLists.txt' \
' && wget -O cryptopp-config.cmake https://raw.githubusercontent.com/noloader/cryptopp-cmake/master/cryptopp-config.cmake' \
' && mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug .. && make && make install', shell=True)
if args.enet:
subprocess.call('cd ' + project_path("build") + \
' && git clone --recursive https://github.com/lsalzman/enet.git' \
' && cd enet' \
' && git checkout e0e7045' \
' && autoreconf -i && ./configure && make && make install', shell=True)
if args.make:
subprocess.call('cd ' + project_path("build") + \
' && cmake -DCMAKE_BUILD_TYPE=Debug ..' \
' && make', shell=True)
if args.ninja:
ninja()
if args.uninstall:
subprocess.call('cd ' + project_path("build") + ' && xargs rm < install_manifest.txt', shell=True)
if args.tests:
subprocess.call('cd ' + project_path("build") + ' && ./tests/libcrowd/tests_crowd --log_level=message \
&& ./tests/liblogin/tests_login --log_level=message', shell=True)
if args.execute:
ninja()
packinstall()
subprocess.call('onze-terminal', shell=True)
if args.packinstall:
ninja()
packinstall()
if args.send:
ips = ["51.158.68.232", "51.15.226.67", "51.15.248.67", "212.47.254.170", "212.47.234.94", "212.47.236.102"]
for ip in ips:
worker(ip)
def ninja():
subprocess.call('cd ' + project_path("build") + \
' && cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Debug ..' \
' && ninja', shell=True)
def packinstall():
subprocess.call('cd ' + project_path("build") + \
' && cpack' \
' && dpkg -i `find . -type f -name *.deb`' \
' && apt-get -f install', shell=True)
def worker(ip):
"""thread worker function"""
work = subprocess.call('cd ' + project_path("build") + \
' && scp `find . -maxdepth 1 -type f -name *.deb` root@' + ip + ':~/', shell=True)
return work
def project_path(sub_dir):
# find the path of the build folder
full_path = str(Path(os.getcwd()).parent / sub_dir)
#full_path = os.path.abspath(os.path.join(os.getcwd(),sub_dir))
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | [
[
[
7,
15
],
[
227,
235
]
],
[
[
23,
26
],
[
4349,
4352
]
],
[
[
34,
36
],
[
4400,
4402
],
[
153,
155
],
[
162,
164
],
[
1398,
1400
],
[
4018,
4020
],
[
4129,
4131
],
[
4164,
4166
]
],
[
[
44,
54
],
[
1449,
1459
],
[
1581,
1591
],
[
2148,
2158
],
[
2456,
2466
],
[
2662,
2672
],
[
2788,
2798
],
[
3037,
3047
],
[
3344,
3354
],
[
3520,
3530
],
[
3764,
3774
]
],
[
[
75,
79
],
[
4013,
4017
]
],
[
[
87,
96
]
],
[
[
102,
106
],
[
4258,
4262
]
],
[
[
3331,
3336
],
[
2623,
2628
],
[
2999,
3004
],
[
3115,
3120
]
],
[
[
3501,
3512
],
[
3015,
3026
],
[
3131,
3142
]
],
[
[
3708,
3714
],
[
3315,
3321
]
],
[
[
3930,
3942
],
[
1413,
1425
],
[
1476,
1488
],
[
1605,
1617
],
[
2172,
2184
],
[
2480,
2492
],
[
2686,
2698
],
[
2812,
2824
],
[
3368,
3380
],
[
3544,
3556
],
[
3788,
3800
]
]
] |
from project.album import Album
class Band:
def __init__(self, name):
self.name = name
self.albums = []
def add_album(self, album: Album):
if album in self.albums:
return f"Band {self.name} already has {album.name} in their library."
self.albums.append(album)
return f"Band {self.name} has added their newest album {album.name}."
def remove_album(self, album_name):
for album in self.albums:
if album.name == album_name:
if album.published:
return f"Album has been published. It cannot be removed."
self.albums.remove(album)
return f"Album {album.name} has been removed."
return f"Album {album_name} is not found."
def details(self):
msg = f"Band {self.name}\n"
msg += "\n".join([album.details() for album in self.albums])
return msg
| [
[
[
26,
31
],
[
158,
163
]
],
[
[
40,
44
]
]
] |
import ipaddress
import socket
import os
def execute_sysctl_command(params):
print("-> sysctl "+params)
os.system('sysctl ' + params)
def ip_string_to_unsigned_int(ip):
ip_ = 0
bytes_ = ip.split(".")
if len(bytes_) == 4:
ip_ = socket.htonl((int(bytes_[0]) << 24) + (int(bytes_[1]) << 16) + (int(bytes_[2]) << 8) + int(bytes_[3]))
return ip_
def generate_sysctl_params_string_apiv03(rules):
sysctl_params = ""
for rule in rules:
src_ip="0"
dst_ip="0"
src_port="0"
dst_port="0"
weight="0"
if "src_ip" in rule:
src_ip=str(ip_string_to_unsigned_int(rule["src_ip"]))
if "dst_ip" in rule:
dst_ip=str(ip_string_to_unsigned_int(rule["dst_ip"]))
if "src_port" in rule:
src_port=str(rule["src_port"])
if "dst_port" in rule:
src_port=str(rule["dst_port"])
if "weight" in rule:
weight=str(rule["weight"])
sysctl_params = sysctl_params+src_ip+" "+dst_ip+" "+weight+" "+src_port+" "+dst_port+" "
sysctl_params = sysctl_params.strip()
return sysctl_params
def generate_sysctl_params_string(ips_weights_dictionary):
sysctl_params = ""
for ip in ips_weights_dictionary:
value=ips_weights_dictionary[ip]
sysctl_params = sysctl_params + str(ip_string_to_unsigned_int(ip)) + " " + str(value) + " "
sysctl_params = sysctl_params.strip()
return sysctl_params
def generate_sysctl_port_params_string(ips_weights_dictionary):
sysctl_params = ""
for ip in ips_weights_dictionary:
value=ips_weights_dictionary[ip]
sysctl_params = sysctl_params + str(ip_string_to_unsigned_int(ip)) + " " + str(socket.htons(value)) + " "
sysctl_params = sysctl_params.strip()
return sysctl_params
def execute_sysctl_read_command(params):
stream = os.popen("sysctl " + params)
return stream.readline()
def set_local_interfaces_rules(rules):
sysctl_params = generate_sysctl_params_string_apiv03(rules)
execute_sysctl_command("-w net.mptcp.mptcp_wrr_li_weights=\"" + sysctl_params + "\"")
def set_local_interfaces_weights(ips_weights_dictionary):
sysctl_params = generate_sysctl_params_string(ips_weights_dictionary)
execute_sysctl_command("-w net.mptcp.mptcp_wrr_li_weights=\"" + sysctl_params + "\"")
def set_remote_interfaces_weights(ips_weights_dictionary):
sysctl_params = generate_sysctl_params_string(ips_weights_dictionary)
execute_sysctl_command("-w net.mptcp.mptcp_wrr_ri_weights=\"" + sysctl_params + "\"")
def set_remote_interfaces_ports(ips_ports_dictionary):
sysctl_params = generate_sysctl_port_params_string(ips_ports_dictionary)
execute_sysctl_command("-w net.mptcp.mptcp_wrr_ri_port=\"" + sysctl_params + "\"")
def set_local_interfaces_ports(ips_ports_dictionary):
sysctl_params = generate_sysctl_port_params_string(ips_ports_dictionary)
execute_sysctl_command("-w net.mptcp.mptcp_wrr_li_port=\"" + sysctl_params + "\"")
def get_remote_interfaces_weights():
return get_sysctl_pair_ip_value("net.mptcp.mptcp_wrr_ri_weights", default_value=1)
def get_srtt_values():
return get_sysctl_pair_ip_value("net.mptcp.mptcp_wrr_srtt", default_value=-1)
def get_cwnd_values():
return get_sysctl_pair_ip_value("net.mptcp.mptcp_wrr_cwnd", default_value=-1)
def get_sysctl_pair_ip_value(sysctl_param, default_value=-1):
values = []
output = execute_sysctl_read_command(sysctl_param)
# output="net.mptcp.mptcp_wrr_li_weights = 335544330 0 1 0 0 0 0 0"
words = output.split("=")
params = words[1].replace('\t', ' ')
params = params.strip(" \t\n")
params = params.split(' ')
params = list(filter(''.__ne__, params)) # filters all "" occurrences (__ne__ => not equal)
print(params)
if len(params) < 2:
values = []
else:
for i in range(0, len(params) - 1, 5):
if params[i] != "0":
value = default_value
if i + 2 < len(params):
value = params[i + 2]
ip = format(ipaddress.IPv4Address(int(params[i].strip())))
values.append({"src_ip":ip, "weight":value})
return values
def get_local_interfaces_weights():
# weights = {}
# output = execute_sysctl_read_command("net.mptcp.mptcp_wrr_li_weights")
# # output="net.mptcp.mptcp_wrr_li_weights = 335544330 1 0 0 0 0 0 0"
#
# words = output.split("=")
#
# params = words[1].replace('\t', ' ')
# params = params.strip(" \t\n")
# params = params.split(' ')
# params = list(filter(''.__ne__, params)) # filters all "" occurrences (__ne__ => not equal)
#
# if len(params) < 2:
# weights = {}
# else:
# for i in range(0, len(params) - 1, 2):
# if params[i] != "0":
# weight = 1
# if i + 1 < len(params):
# weight = params[i + 1]
#
# ip = format(ipaddress.IPv4Address(int(params[i].strip())))
# weights[ip] = weight
#
# return weights
return get_sysctl_pair_ip_value("net.mptcp.mptcp_wrr_li_weights", default_value=1)
| [
[
[
7,
16
],
[
4177,
4186
]
],
[
[
24,
30
],
[
258,
264
],
[
1760,
1766
]
],
[
[
38,
40
],
[
113,
115
],
[
1913,
1915
]
],
[
[
46,
68
],
[
2079,
2101
],
[
2302,
2324
],
[
2527,
2549
],
[
2750,
2772
],
[
2969,
2991
]
],
[
[
148,
173
],
[
632,
657
],
[
726,
751
],
[
1370,
1395
],
[
1717,
1742
]
],
[
[
381,
417
],
[
2031,
2067
]
],
[
[
1162,
1191
],
[
2244,
2273
],
[
2469,
2498
]
],
[
[
1500,
1534
],
[
2689,
2723
],
[
2908,
2942
]
],
[
[
1863,
1890
],
[
3486,
3513
]
],
[
[
1976,
2002
]
],
[
[
2170,
2198
]
],
[
[
2394,
2423
]
],
[
[
2618,
2645
]
],
[
[
2838,
2864
]
],
[
[
3057,
3086
]
],
[
[
3183,
3198
]
],
[
[
3290,
3305
]
],
[
[
3399,
3423
],
[
3101,
3125
],
[
3213,
3237
],
[
3320,
3344
],
[
5236,
5260
]
],
[
[
4313,
4341
]
]
] |
"""
Created on 21 de mar de 2018
@author: clebson
"""
from hidrocomp.files.fileRead import FileRead
class Nasa(FileRead):
"""
class files read: National Aeronautics and
Space Administration - NASA
"""
source = "NASA"
extension = "hdf5"
def __init__(self, params):
pass
| [
[
[
92,
100
],
[
114,
122
]
],
[
[
109,
113
]
]
] |
import logging
import os
import sys
import json
import time
import re
UNCENSORED_LOGGING = os.getenv("UNCENSORED_LOGGING")
LOG_CENSOR = [
{ "regex": r"(eyJ0e[A-Za-z0-9-_]{10})[A-Za-z0-9-_]*\.[A-Za-z0-9-_]*\.[A-Za-z0-9-_]*([A-Za-z0-9-_]{10})",
"replace": "\\g<1>XXX<JWTTOKEN>XXX\\g<2>",
"description": "X-out JWT Token payload"
},
{ "regex": r"(EDL-[A-Za-z0-9]+)[A-Za-z0-9]{40}([A-Za-z0-9]{10})",
"replace": "\\g<1>XXX<EDLTOKEN>XXX\\g<2>",
"description": "X-out non-JWT EDL token"
},
{ "regex": r"(Basic [A-Za-z0-9-_]{5})[A-Za-z0-9]*([A-Za-z0-9-_]{5})",
"replace": "\\g<1>XXX<BASICAUTH>XXX\\g<2>",
"description": "X-out Basic Auth Credentials"
},
{ "regex": r"([^A-Za-z0-9/+=][A-Za-z0-9/+=]{5})[A-Za-z0-9/+=]{30}([A-Za-z0-9/+=]{5}[^A-Za-z0-9/+=])",
"replace": "\\g<1>XXX<AWSSECRET>XXX\\g<2>",
"description": "X-out AWS Secret"
}
]
def return_timing_object(**timing):
timing_object = { "service": "Unknown", "endpoint": "Unknown", "method": "GET", "duration": 0, "unit": "milliseconds"}
timing_object.update({k.lower(): v for k,v in timing.items()})
return {"timing":timing_object }
def duration(time_in):
# Return the time duration in milliseconds
delta = time.time() - time_in
return(float("{:.2f}".format(delta*1000)))
def filter_log_credentials(msg):
if UNCENSORED_LOGGING:
return msg
for regex in LOG_CENSOR:
result = re.sub(regex["regex"], regex["replace"], msg, 0, re.MULTILINE)
if result:
msg = str(result)
return msg
def reformat_for_json(msg):
if type(msg) is dict:
return json.dumps(msg).replace("'", '"')
if '{' in msg:
try:
json_obj = json.loads(msg)
return json.dumps(json_obj).replace("'", '"')
except json.decoder.JSONDecodeError:
# Not JSON.
pass
return '"{0}"'.format(msg)
class CustomLogFilter(logging.Filter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = { 'build_vers': os.getenv("BUILD_VERSION", "NOBUILD"),
'maturity': os.getenv('MATURITY', 'DEV'),
'request_id': None,
'origin_request_id': None,
'user_id': None,
'route': None
}
def filter(self, record):
record.msg = filter_log_credentials(reformat_for_json(record.msg))
record.build_vers = self.params['build_vers']
record.maturity = self.params['maturity']
record.request_id = self.params['request_id']
record.origin_request_id = self.params['origin_request_id']
record.user_id = self.params['user_id']
record.route = self.params['route']
return True
def update(self, **context):
for key in context:
self.params.update({key: context[key]})
custom_log_filter = CustomLogFilter()
def log_context(**context):
custom_log_filter.update(**context)
def get_log():
loglevel = os.getenv('LOGLEVEL', 'INFO')
logtype = os.getenv('LOGTYPE', 'json')
if logtype == 'flat':
log_fmt_str = "%(levelname)s: %(message)s (%(filename)s line " + \
"%(lineno)d/%(build_vers)s/%(maturity)s) - " + \
"RequestId: %(request_id)s; OriginRequestId: %(origin_request_id)s; user_id: %(user_id)s; route: %(route)s"
else:
log_fmt_str = '{"level": "%(levelname)s", ' + \
'"RequestId": "%(request_id)s", ' + \
'"OriginRequestId": "%(origin_request_id)s", ' + \
'"message": %(message)s, ' + \
'"maturity": "%(maturity)s", ' + \
'"user_id": "%(user_id)s", ' + \
'"route": "%(route)s", ' + \
'"build": "%(build_vers)s", ' + \
'"filename": "%(filename)s", ' + \
'"lineno": %(lineno)d } '
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
h.setFormatter(logging.Formatter(log_fmt_str))
h.addFilter(custom_log_filter)
logger.addHandler(h)
logger.setLevel(getattr(logging, loglevel))
if os.getenv("QUIETBOTO", 'TRUE').upper() == 'TRUE':
# BOTO, be quiet plz
logging.getLogger('boto3').setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('nose').setLevel(logging.ERROR)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('s3transfer').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('connectionpool').setLevel(logging.ERROR)
return logger
| [
[
[
7,
14
],
[
2045,
2052
],
[
4154,
4161
],
[
4246,
4253
],
[
4299,
4306
],
[
4419,
4426
],
[
4534,
4541
],
[
4570,
4577
],
[
4593,
4600
],
[
4632,
4639
],
[
4655,
4662
],
[
4690,
4697
],
[
4713,
4720
],
[
4757,
4764
],
[
4780,
4787
],
[
4821,
4828
],
[
4844,
4851
],
[
4882,
4889
],
[
4905,
4912
],
[
4950,
4957
]
],
[
[
22,
24
],
[
92,
94
],
[
2185,
2187
],
[
2260,
2262
],
[
3188,
3190
],
[
3232,
3234
],
[
4447,
4449
]
],
[
[
32,
35
],
[
4268,
4271
]
],
[
[
43,
47
],
[
1742,
1746
],
[
1831,
1835
],
[
1866,
1870
],
[
1920,
1924
]
],
[
[
55,
59
],
[
1327,
1331
]
],
[
[
67,
69
],
[
1531,
1533
],
[
1580,
1582
]
],
[
[
71,
89
],
[
1441,
1459
]
],
[
[
125,
135
],
[
1502,
1512
]
],
[
[
985,
1005
]
],
[
[
1249,
1257
]
],
[
[
1405,
1427
],
[
2540,
2562
]
],
[
[
1677,
1694
],
[
2563,
2580
]
],
[
[
2029,
2044
],
[
3067,
3082
]
],
[
[
3047,
3064
],
[
3119,
3136
],
[
4347,
4364
]
],
[
[
3091,
3102
]
],
[
[
3161,
3168
]
]
] |
import logging
import platform
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services.infra import do_run, log_startup_message, start_proxy_for_service
from localstack.services.install import INSTALL_PATH_KMS_BINARY_PATTERN
from localstack.utils.common import get_arch, get_free_tcp_port, wait_for_port_open
LOG = logging.getLogger(__name__)
def start_kms_local(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.service_port("kms")
backend_port = get_free_tcp_port()
kms_binary = INSTALL_PATH_KMS_BINARY_PATTERN.replace(
"<arch>", f"{platform.system().lower()}-{get_arch()}"
)
log_startup_message("KMS")
start_proxy_for_service("kms", port, backend_port, update_listener)
env_vars = {
"PORT": str(backend_port),
"KMS_REGION": config.DEFAULT_REGION,
"REGION": config.DEFAULT_REGION,
"KMS_ACCOUNT_ID": TEST_AWS_ACCOUNT_ID,
"ACCOUNT_ID": TEST_AWS_ACCOUNT_ID,
}
if config.dirs.data:
env_vars["KMS_DATA_PATH"] = config.dirs.data
result = do_run(kms_binary, asynchronous, env_vars=env_vars)
wait_for_port_open(backend_port)
return result
| [
[
[
7,
14
],
[
369,
376
]
],
[
[
22,
30
],
[
655,
663
]
],
[
[
55,
61
],
[
510,
516
],
[
879,
885
],
[
920,
926
],
[
1046,
1052
],
[
1100,
1106
]
],
[
[
95,
114
],
[
969,
988
],
[
1012,
1031
]
],
[
[
153,
159
],
[
1130,
1136
]
],
[
[
161,
180
],
[
706,
725
]
],
[
[
182,
205
],
[
737,
760
]
],
[
[
246,
277
],
[
593,
624
]
],
[
[
314,
322
],
[
683,
691
]
],
[
[
324,
341
],
[
556,
573
]
],
[
[
343,
361
],
[
1186,
1204
]
],
[
[
363,
366
]
],
[
[
403,
418
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('networks', '0011_add_dns_servers_group'),
]
operations = [
migrations.RemoveField(
model_name='network',
name='dns_servers',
),
]
| [
[
[
47,
63
]
],
[
[
87,
97
],
[
124,
134
],
[
255,
265
]
],
[
[
99,
105
]
],
[
[
114,
123
]
]
] |
import io
import os
import sys
import subprocess
from test import support
import unittest
import unittest.test
from .test_result import BufferedWriter
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
pass
def testFail(self):
raise AssertionError
def testError(self):
1/0
@unittest.skip('skipping')
def testSkipped(self):
raise AssertionError
@unittest.expectedFailure
def testExpectedFailure(self):
raise AssertionError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def loadTestsFromNames(self, names, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_defaultTest_with_string(self):
class FakeRunner(object):
def run(self, test):
self.test = test
return True
old_argv = sys.argv
sys.argv = ['faketest']
runner = FakeRunner()
program = unittest.TestProgram(testRunner=runner, exit=False,
defaultTest='unittest.test',
testLoader=self.FooBarLoader())
sys.argv = old_argv
self.assertEqual(('unittest.test',), program.testNames)
def test_defaultTest_with_iterable(self):
class FakeRunner(object):
def run(self, test):
self.test = test
return True
old_argv = sys.argv
sys.argv = ['faketest']
runner = FakeRunner()
program = unittest.TestProgram(
testRunner=runner, exit=False,
defaultTest=['unittest.test', 'unittest.test2'],
testLoader=self.FooBarLoader())
sys.argv = old_argv
self.assertEqual(['unittest.test', 'unittest.test2'],
program.testNames)
def test_NonExit(self):
stream = BufferedWriter()
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=stream),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
out = stream.getvalue()
self.assertIn('\nFAIL: testFail ', out)
self.assertIn('\nERROR: testError ', out)
self.assertIn('\nUNEXPECTED SUCCESS: testUnexpectedSuccess ', out)
expected = ('\n\nFAILED (failures=1, errors=1, skipped=1, '
'expected failures=1, unexpected successes=1)\n')
self.assertTrue(out.endswith(expected))
def test_Exit(self):
stream = BufferedWriter()
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=stream),
exit=True,
testLoader=self.FooBarLoader())
out = stream.getvalue()
self.assertIn('\nFAIL: testFail ', out)
self.assertIn('\nERROR: testError ', out)
self.assertIn('\nUNEXPECTED SUCCESS: testUnexpectedSuccess ', out)
expected = ('\n\nFAILED (failures=1, errors=1, skipped=1, '
'expected failures=1, unexpected successes=1)\n')
self.assertTrue(out.endswith(expected))
def test_ExitAsDefault(self):
stream = BufferedWriter()
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=stream),
testLoader=self.FooBarLoader())
out = stream.getvalue()
self.assertIn('\nFAIL: testFail ', out)
self.assertIn('\nERROR: testError ', out)
self.assertIn('\nUNEXPECTED SUCCESS: testUnexpectedSuccess ', out)
expected = ('\n\nFAILED (failures=1, errors=1, skipped=1, '
'expected failures=1, unexpected successes=1)\n')
self.assertTrue(out.endswith(expected))
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
tb_locals = False
testRunner = None
testLoader = unittest.defaultTestLoader
module = '__main__'
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = 0
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError -= 1
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = 0
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
setattr(program, attr, None)
program.parseArgs([None])
self.assertIs(getattr(program, attr), False)
false = []
setattr(program, attr, false)
program.parseArgs([None])
self.assertIs(getattr(program, attr), false)
true = [42]
setattr(program, attr, true)
program.parseArgs([None])
self.assertIs(getattr(program, attr), true)
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertIs(getattr(program, attr), True)
setattr(program, attr, False)
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
program.parseArgs([None, opt])
self.assertEqual(cm.exception.args, (2,))
setattr(program, attr, True)
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
program.parseArgs([None, opt])
self.assertEqual(cm.exception.args, (2,))
def testWarning(self):
"""Test the warnings argument"""
# see #10535
class FakeTP(unittest.TestProgram):
def parseArgs(self, *args, **kw): pass
def runTests(self, *args, **kw): pass
warnoptions = sys.warnoptions[:]
try:
sys.warnoptions[:] = []
# no warn options, no arg -> default
self.assertEqual(FakeTP().warnings, 'default')
# no warn options, w/ arg -> arg value
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
sys.warnoptions[:] = ['somevalue']
# warn options, no arg -> None
# warn options, w/ arg -> arg value
self.assertEqual(FakeTP().warnings, None)
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
finally:
sys.warnoptions[:] = warnoptions
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.warnings = 'warnings'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer',
'tb_locals': False,
'warnings': 'warnings'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def test_locals(self):
program = self.program
program.testRunner = FakeRunner
program.parseArgs([None, '--locals'])
self.assertEqual(True, program.tb_locals)
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'buffer': False,
'failfast': False,
'tb_locals': True,
'verbosity': 1,
'warnings': None})
def testRunTestsOldRunnerClass(self):
program = self.program
# Two TypeErrors are needed to fall all the way back to old-style
# runners - one to fail tb_locals, one to fail buffer etc.
FakeRunner.raiseError = 2
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
def _patch_isfile(self, names, exists=True):
def isfile(path):
return path in names
original = os.path.isfile
os.path.isfile = isfile
def restore():
os.path.isfile = original
self.addCleanup(restore)
def testParseArgsFileNames(self):
# running tests with filenames instead of module names
program = self.program
argv = ['progname', 'foo.py', 'bar.Py', 'baz.PY', 'wing.txt']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
# note that 'wing.txt' is not a Python file so the name should
# *not* be converted to a module name
expected = ['foo', 'bar', 'baz', 'wing.txt']
self.assertEqual(program.testNames, expected)
def testParseArgsFilePaths(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsNonExistentFiles(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile([])
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
def testParseArgsAbsolutePathsThatCanBeConverted(self):
cur_dir = os.getcwd()
program = self.program
def _join(name):
return os.path.join(cur_dir, name)
argv = ['progname', _join('foo/bar/baz.py'), _join('green\\red.py')]
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsAbsolutePathsThatCannotBeConverted(self):
program = self.program
# even on Windows '/...' is considered absolute by os.path.abspath
argv = ['progname', '/foo/bar/baz.py', '/green/red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
# it may be better to use platform specific functions to normalise paths
# rather than accepting '.PY' and '\' as file separator on Linux / Mac
# it would also be better to check that a filename is a valid module
# identifier (we have a regex for this in loader.py)
# for invalid filenames should we raise a useful error rather than
# leaving the current error message (import of filename fails) in place?
def testParseArgsSelectedTestNames(self):
program = self.program
argv = ['progname', '-k', 'foo', '-k', 'bar', '-k', '*pat*']
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNamePatterns, ['*foo*', '*bar*', '*pat*'])
def testSelectedTestNamesFunctionalTest(self):
def run_unittest(args):
p = subprocess.Popen([sys.executable, '-m', 'unittest'] + args,
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__))
with p:
_, stderr = p.communicate()
return stderr.decode()
t = '_test_warnings'
self.assertIn('Ran 5 tests', run_unittest([t]))
self.assertIn('Ran 5 tests', run_unittest(['-k', 'TestWarnings', t]))
self.assertIn('Ran 5 tests', run_unittest(['discover', '-p', '*_test*', '-k', 'TestWarnings']))
self.assertIn('Ran 1 test ', run_unittest(['-k', 'f', t]))
self.assertIn('Ran 5 tests', run_unittest(['-k', 't', t]))
self.assertIn('Ran 2 tests', run_unittest(['-k', '*t', t]))
self.assertIn('Ran 5 tests', run_unittest(['-k', '*test_warnings.*Warning*', t]))
self.assertIn('Ran 1 test ', run_unittest(['-k', '*test_warnings.*warning*', t]))
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
9
]
],
[
[
18,
20
],
[
331,
333
],
[
347,
349
],
[
12578,
12580
],
[
12601,
12603
],
[
13985,
13987
],
[
12660,
12662
],
[
14072,
14074
],
[
15777,
15779
]
],
[
[
28,
31
],
[
2738,
2741
],
[
2755,
2758
],
[
3026,
3029
],
[
3305,
3308
],
[
3322,
3325
],
[
3572,
3575
],
[
8866,
8869
],
[
8910,
8913
],
[
9180,
9183
],
[
9464,
9467
],
[
11953,
11956
],
[
15664,
15667
]
],
[
[
39,
49
],
[
15646,
15656
],
[
15729,
15739
],
[
15756,
15766
]
],
[
[
67,
74
],
[
8132,
8139
],
[
8404,
8411
]
],
[
[
82,
90
]
],
[
[
98,
111
],
[
177,
185
],
[
1650,
1658
],
[
1830,
1838
],
[
1929,
1937
],
[
2035,
2043
],
[
2142,
2150
],
[
5836,
5844
],
[
5996,
6004
],
[
6512,
6520
],
[
16586,
16594
],
[
262,
270
],
[
363,
371
],
[
1038,
1046
],
[
1167,
1175
],
[
1338,
1346
],
[
1426,
1434
],
[
2827,
2835
],
[
3394,
3402
],
[
3780,
3788
],
[
3898,
3906
],
[
4568,
4576
],
[
4635,
4643
],
[
5265,
5273
],
[
5332,
5340
],
[
8720,
8728
],
[
1113,
1121
],
[
1304,
1312
]
],
[
[
137,
151
],
[
3745,
3759
],
[
4488,
4502
],
[
5185,
5199
]
],
[
[
160,
176
],
[
2357,
2373
],
[
2518,
2534
]
],
[
[
5815,
5835
],
[
6577,
6597
]
],
[
[
6132,
6138
],
[
6477,
6483
],
[
10230,
10236
],
[
10616,
10622
],
[
11882,
11888
]
],
[
[
6157,
6167
],
[
6275,
6285
],
[
6315,
6325
],
[
6350,
6360
],
[
6439,
6449
],
[
6656,
6666
],
[
6691,
6701
],
[
6722,
6732
],
[
9598,
9608
],
[
9813,
9823
],
[
10167,
10177
],
[
10342,
10352
],
[
10363,
10373
],
[
10506,
10516
],
[
10553,
10563
],
[
10713,
10723
],
[
10872,
10882
],
[
11396,
11406
],
[
11451,
11461
],
[
11769,
11779
],
[
11819,
11829
],
[
12371,
12381
]
],
[
[
6492,
6511
]
]
] |
__author__ = 'swhite'
| [
[
[
0,
10
]
]
] |
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Main WSGI app for blob-management tool.
"""
import webapp2
from app import routes
APP = webapp2.WSGIApplication(routes.ROUTES)
| [
[
[
652,
659
],
[
691,
698
]
],
[
[
677,
683
],
[
715,
721
]
],
[
[
685,
688
]
]
] |
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import cv2
import numpy as np
from typing import Any, Dict
from openvino.model_zoo.model_api.models import SegmentationModel
from openvino.model_zoo.model_api.models.types import NumericalValue
from ote_sdk.utils.segmentation_utils import create_hard_prediction_from_soft_prediction
class BlurSegmentation(SegmentationModel):
__model__ = 'blur_segmentation'
def __init__(self, model_adapter, configuration=None, preload=False):
super().__init__(model_adapter, configuration, preload)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'soft_threshold': NumericalValue(default_value=0.5, min=0.0, max=1.0),
'blur_strength': NumericalValue(value_type=int, default_value=1, min=0, max=25)
})
return parameters
def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):
predictions = outputs[self.output_blob_name].squeeze()
soft_prediction = np.transpose(predictions, axes=(1, 2, 0))
hard_prediction = create_hard_prediction_from_soft_prediction(
soft_prediction=soft_prediction,
soft_threshold=self.soft_threshold,
blur_strength=self.blur_strength
)
hard_prediction = cv2.resize(hard_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST)
soft_prediction = cv2.resize(soft_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST)
metadata['soft_predictions'] = soft_prediction
return hard_prediction
| [
[
[
589,
592
],
[
1875,
1878
],
[
1958,
1961
],
[
2003,
2006
],
[
2086,
2089
]
],
[
[
600,
611
],
[
1458,
1460
],
[
1587,
1589
]
],
[
[
631,
634
],
[
1491,
1494
]
],
[
[
636,
640
],
[
1448,
1452
],
[
1481,
1485
]
],
[
[
690,
707
],
[
891,
908
]
],
[
[
762,
776
],
[
1229,
1243
],
[
1311,
1325
]
],
[
[
822,
865
],
[
1656,
1699
]
],
[
[
874,
890
]
]
] |
from django.db import models
from django.template import Library
from ..models import MetaTag
from ..utils import truncate_language_code_from_path, check_caching_enabled
register = Library()
@register.inclusion_tag('metatags/includes/metatags.html', takes_context=True)
def include_metatags(context, model_instance=None, default_title='', default_keywords='', default_description=''):
is_caching_enabled = check_caching_enabled()
meta_tags_context = {
'title': default_title,
'keywords': default_keywords,
'description': default_description,
}
if isinstance(model_instance, models.Model):
# Try to retrieve attached meta tags for a model instance.
if is_caching_enabled:
# Try fetch meta tags from cache.
meta_tags = MetaTag.objects.fetch_from_cache_attached_to_model_instance(model_instance)
else:
meta_tags = MetaTag.objects.get_attached_to_model_instance(model_instance)
else:
# Try to retrieve meta tags by an URL-path.
url_path = truncate_language_code_from_path(context['request'].path_info)
if is_caching_enabled:
meta_tags = MetaTag.objects.fetch_from_cache_attached_to_url_path(url_path)
else:
meta_tags = MetaTag.objects.get_attached_to_url_path(url_path)
if meta_tags is not None:
meta_tags_context.update((key, value) for key, value in meta_tags._asdict().items() if value)
return {'meta_tags': meta_tags_context}
| [
[
[
22,
28
],
[
618,
624
]
],
[
[
57,
64
],
[
183,
190
]
],
[
[
87,
94
],
[
801,
808
],
[
915,
922
],
[
1177,
1184
],
[
1279,
1286
]
],
[
[
115,
147
],
[
1059,
1091
]
],
[
[
149,
170
],
[
414,
435
]
],
[
[
172,
180
],
[
196,
204
]
],
[
[
278,
294
]
]
] |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CLI interface for nova management.
"""
import collections
import functools
import os
import re
import sys
import time
import traceback
import typing as ty
from urllib import parse as urlparse
from dateutil import parser as dateutil_parser
from keystoneauth1 import exceptions as ks_exc
from neutronclient.common import exceptions as neutron_client_exc
from os_brick.initiator import connector
import os_resource_classes as orc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import uuidutils
import prettytable
from sqlalchemy.engine import url as sqla_url
from nova.cmd import common as cmd_common
from nova.compute import api
from nova.compute import instance_actions
from nova.compute import rpcapi
import nova.conf
from nova import config
from nova import context
from nova.db import constants as db_const
from nova.db.main import api as db
from nova.db import migration
from nova import exception
from nova.i18n import _
from nova.network import constants
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import compute_node as compute_node_obj
from nova.objects import fields as obj_fields
from nova.objects import host_mapping as host_mapping_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_mapping as instance_mapping_obj
from nova.objects import pci_device as pci_device_obj
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as virtual_interface_obj
from nova import rpc
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova import version
from nova.virt.libvirt import machine_type_utils
from nova.volume import cinder
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
# Keep this list sorted and one entry per line for readability.
_EXTRA_DEFAULT_LOG_LEVELS = [
'nova=ERROR',
'oslo_concurrency=INFO',
'oslo_db=INFO',
'oslo_policy=INFO',
'oslo.privsep=ERROR',
'os_brick=ERROR',
]
# Consts indicating whether allocations need to be healed by creating them or
# by updating existing allocations.
_CREATE = 'create'
_UPDATE = 'update'
# Decorators for actions
args = cmd_common.args
action_description = cmd_common.action_description
def mask_passwd_in_url(url):
parsed = urlparse.urlparse(url)
safe_netloc = re.sub(':.*@', ':****@', parsed.netloc)
new_parsed = urlparse.ParseResult(
parsed.scheme, safe_netloc,
parsed.path, parsed.params,
parsed.query, parsed.fragment)
return urlparse.urlunparse(new_parsed)
def format_dict(dct, dict_property="Property", dict_value='Value',
sort_key=None):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param dict_value: header label for the value (second) column
:param sort_key: key used for sorting the dict
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
v = str(v)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, str) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
return encodeutils.safe_encode(pt.get_string()).decode()
class DbCommands(object):
"""Class for managing the main database."""
# NOTE(danms): These functions are called with a DB context and a
# count, which is the maximum batch size requested by the
# user. They must be idempotent. At most $count records should be
# migrated. The function must return a tuple of (found, done). The
# found value indicates how many unmigrated/candidate records existed in
# the database prior to the migration (either total, or up to the
# $count limit provided), and a nonzero found value may tell the user
# that there is still work to do. The done value indicates whether
# or not any records were actually migrated by the function. Thus
# if both (found, done) are nonzero, work was done and some work
# remains. If found is nonzero and done is zero, some records are
# not migratable (or don't need migrating), but all migrations that can
# complete have finished.
# NOTE(stephenfin): These names must be unique
online_migrations = (
# Added in Pike
quotas_obj.migrate_quota_limits_to_api_db,
# Added in Pike
quotas_obj.migrate_quota_classes_to_api_db,
# Added in Queens
db.migration_migrate_to_uuid,
# Added in Queens
block_device_obj.BlockDeviceMapping.populate_uuids,
# Added in Rocky
# NOTE(tssurya): This online migration is going to be backported to
# Queens and Pike since instance.avz of instances before Pike
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
# Added in Stein
compute_node_obj.migrate_empty_ratio,
# Added in Stein
virtual_interface_obj.fill_virtual_interface_list,
# Added in Stein
instance_mapping_obj.populate_user_id,
# Added in Victoria
pci_device_obj.PciDevice.populate_dev_uuids,
)
@args('--local_cell', action='store_true',
help='Only sync db in the local cell: do not attempt to fan-out '
'to all cells')
@args('version', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, local_cell=False):
"""Sync the database up to the most recent version."""
if not local_cell:
ctxt = context.RequestContext()
# NOTE(mdoff): Multiple cells not yet implemented. Currently
# fanout only looks for cell0.
try:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, objects.CellMapping.CELL0_UUID,
)
with context.target_cell(ctxt, cell_mapping) as cctxt:
migration.db_sync(version, context=cctxt)
except exception.CellMappingNotFound:
msg = _(
'WARNING: cell0 mapping not found - not syncing cell0.'
)
print(msg)
except Exception as e:
msg = _(
'ERROR: Could not access cell0.\n'
'Has the nova_api database been created?\n'
'Has the nova_cell0 database been created?\n'
'Has "nova-manage api_db sync" been run?\n'
'Has "nova-manage cell_v2 map_cell0" been run?\n'
'Is [api_database]/connection set in nova.conf?\n'
'Is the cell0 database connection URL correct?\n'
'Error: %s'
)
print(msg % str(e))
return 1
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', type=int, metavar='<number>', dest='max_rows',
help='Maximum number of deleted rows to archive. Defaults to 1000. '
'Note that this number does not include the corresponding '
'rows, if any, that are removed from the API database for '
'deleted instances.')
@args('--before', metavar='<date>',
help=('Archive rows that have been deleted before this date. '
'Accepts date strings in the default format output by the '
'``date`` command, as well as ``YYYY-MM-DD [HH:mm:ss]``.'))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Print how many rows were archived per table.')
@args('--until-complete', action='store_true', dest='until_complete',
default=False,
help=('Run continuously until all deleted rows are archived. Use '
'max_rows as a batch size for each iteration.'))
@args('--purge', action='store_true', dest='purge', default=False,
help='Purge all data from shadow tables after archive completes')
@args('--all-cells', action='store_true', dest='all_cells',
default=False, help='Run command across all cells.')
@args('--task-log', action='store_true', dest='task_log', default=False,
help=('Also archive ``task_log`` table records. Note that '
'``task_log`` records are never deleted, so archiving them '
'will move all of the ``task_log`` records up to now into the '
'shadow tables. It is recommended to also specify the '
'``--before`` option to avoid races for those consuming '
'``task_log`` record data via the '
'``/os-instance_usage_audit_log`` API (example: Telemetry).'))
@args('--sleep', type=int, metavar='<seconds>', dest='sleep',
help='The amount of time in seconds to sleep between batches when '
'``--until-complete`` is used. Defaults to 0.')
def archive_deleted_rows(
self, max_rows=1000, verbose=False,
until_complete=False, purge=False,
before=None, all_cells=False, task_log=False, sleep=0,
):
"""Move deleted rows from production tables to shadow tables.
Returns 0 if nothing was archived, 1 if some number of rows were
archived, 2 if max_rows is invalid, 3 if no connection could be
established to the API DB, 4 if before date is invalid. If automating,
this should be run continuously while the result
is 1, stopping at 0.
"""
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return 2
if max_rows > db_const.MAX_INT:
print(_('max rows must be <= %(max_value)d') %
{'max_value': db_const.MAX_INT})
return 2
ctxt = context.get_admin_context()
try:
# NOTE(tssurya): This check has been added to validate if the API
# DB is reachable or not as this is essential for purging the
# related API database records of the deleted instances.
cell_mappings = objects.CellMappingList.get_all(ctxt)
except db_exc.CantStartEngineError:
print(_('Failed to connect to API DB so aborting this archival '
'attempt. Please check your config file to make sure that '
'[api_database]/connection is set and run this '
'command again.'))
return 3
if before:
try:
before_date = dateutil_parser.parse(before, fuzzy=True)
except ValueError as e:
print(_('Invalid value for --before: %s') % e)
return 4
else:
before_date = None
table_to_rows_archived = {}
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
interrupt = False
if all_cells:
# Sort first by cell name, then by table:
# +--------------------------------+-------------------------+
# | Table | Number of Rows Archived |
# +--------------------------------+-------------------------+
# | cell0.block_device_mapping | 1 |
# | cell1.block_device_mapping | 1 |
# | cell1.instance_actions | 2 |
# | cell1.instance_actions_events | 2 |
# | cell2.block_device_mapping | 1 |
# | cell2.instance_actions | 2 |
# | cell2.instance_actions_events | 2 |
# ...
def sort_func(item):
cell_name, table = item[0].split('.')
return cell_name, table
print_sort_func = sort_func
else:
cell_mappings = [None]
print_sort_func = None
total_rows_archived = 0
for cell_mapping in cell_mappings:
# NOTE(Kevin_Zheng): No need to calculate limit for each
# cell if until_complete=True.
# We need not adjust max rows to avoid exceeding a specified total
# limit because with until_complete=True, we have no total limit.
if until_complete:
max_rows_to_archive = max_rows
elif max_rows > total_rows_archived:
# We reduce the max rows to archive based on what we've
# archived so far to avoid potentially exceeding the specified
# total limit.
max_rows_to_archive = max_rows - total_rows_archived
else:
break
# If all_cells=False, cell_mapping is None
with context.target_cell(ctxt, cell_mapping) as cctxt:
cell_name = cell_mapping.name if cell_mapping else None
try:
rows_archived = self._do_archive(
table_to_rows_archived,
cctxt,
max_rows_to_archive,
until_complete,
verbose,
before_date,
cell_name,
task_log,
sleep)
except KeyboardInterrupt:
interrupt = True
break
# TODO(melwitt): Handle skip/warn for unreachable cells. Note
# that cell_mappings = [None] if not --all-cells
total_rows_archived += rows_archived
if until_complete and verbose:
if interrupt:
print('.' + _('stopped')) # noqa
else:
print('.' + _('complete')) # noqa
if verbose:
if table_to_rows_archived:
print(format_dict(
table_to_rows_archived,
dict_property=_('Table'),
dict_value=_('Number of Rows Archived'),
sort_key=print_sort_func,
))
else:
print(_('Nothing was archived.'))
if table_to_rows_archived and purge:
if verbose:
print(_('Rows were archived, running purge...'))
self.purge(purge_all=True, verbose=verbose, all_cells=all_cells)
# NOTE(danms): Return nonzero if we archived something
return int(bool(table_to_rows_archived))
def _do_archive(
self, table_to_rows_archived, cctxt, max_rows,
until_complete, verbose, before_date, cell_name, task_log, sleep,
):
"""Helper function for archiving deleted rows for a cell.
This will archive deleted rows for a cell database and remove the
associated API database records for deleted instances.
:param table_to_rows_archived: Dict tracking the number of rows
archived by <cell_name>.<table name>. Example:
{'cell0.instances': 2,
'cell1.instances': 5}
:param cctxt: Cell-targeted nova.context.RequestContext if archiving
across all cells
:param max_rows: Maximum number of deleted rows to archive
:param until_complete: Whether to run continuously until all deleted
rows are archived
:param verbose: Whether to print how many rows were archived per table
:param before_date: Archive rows that were deleted before this date
:param cell_name: Name of the cell or None if not archiving across all
cells
:param task_log: Whether to archive task_log table rows
:param sleep: The amount of time in seconds to sleep between batches
when ``until_complete`` is True.
"""
ctxt = context.get_admin_context()
while True:
run, deleted_instance_uuids, total_rows_archived = \
db.archive_deleted_rows(
cctxt, max_rows, before=before_date, task_log=task_log)
for table_name, rows_archived in run.items():
if cell_name:
table_name = cell_name + '.' + table_name
table_to_rows_archived.setdefault(table_name, 0)
table_to_rows_archived[table_name] += rows_archived
if deleted_instance_uuids:
table_to_rows_archived.setdefault(
'API_DB.instance_mappings', 0)
table_to_rows_archived.setdefault(
'API_DB.request_specs', 0)
table_to_rows_archived.setdefault(
'API_DB.instance_group_member', 0)
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.request_specs'] += deleted_specs
deleted_group_members = (
objects.InstanceGroup.destroy_members_bulk(
ctxt, deleted_instance_uuids))
table_to_rows_archived[
'API_DB.instance_group_member'] += deleted_group_members
# If we're not archiving until there is nothing more to archive, we
# have reached max_rows in this cell DB or there was nothing to
# archive.
if not until_complete or not run:
break
if verbose:
sys.stdout.write('.')
# Optionally sleep between batches to throttle the archiving.
time.sleep(sleep)
return total_rows_archived
@args('--before', metavar='<before>', dest='before',
help='If specified, purge rows from shadow tables that are older '
'than this. Accepts date strings in the default format output '
'by the ``date`` command, as well as ``YYYY-MM-DD '
'[HH:mm:ss]``.')
@args('--all', dest='purge_all', action='store_true',
help='Purge all rows in the shadow tables')
@args('--verbose', dest='verbose', action='store_true', default=False,
help='Print information about purged records')
@args('--all-cells', dest='all_cells', action='store_true', default=False,
help='Run against all cell databases')
def purge(self, before=None, purge_all=False, verbose=False,
all_cells=False):
if before is None and purge_all is False:
print(_('Either --before or --all is required'))
return 1
if before:
try:
before_date = dateutil_parser.parse(before, fuzzy=True)
except ValueError as e:
print(_('Invalid value for --before: %s') % e)
return 2
else:
before_date = None
def status(msg):
if verbose:
print('%s: %s' % (identity, msg))
deleted = 0
admin_ctxt = context.get_admin_context()
if all_cells:
try:
cells = objects.CellMappingList.get_all(admin_ctxt)
except db_exc.DBError:
print(_('Unable to get cell list from API DB. '
'Is it configured?'))
return 4
for cell in cells:
identity = _('Cell %s') % cell.identity
with context.target_cell(admin_ctxt, cell) as cctxt:
deleted += db.purge_shadow_tables(
cctxt, before_date, status_fn=status)
else:
identity = _('DB')
deleted = db.purge_shadow_tables(
admin_ctxt, before_date, status_fn=status)
if deleted:
return 0
else:
return 3
def _run_migration(self, ctxt, max_count):
ran = 0
exceptions = False
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count)
except Exception:
msg = (_("Error attempting to run %(method)s") % dict(
method=migration_meth))
print(msg)
LOG.exception(msg)
exceptions = True
found = done = 0
name = migration_meth.__name__
if found:
print(_('%(total)i rows matched query %(meth)s, %(done)i '
'migrated') % {'total': found,
'meth': name,
'done': done})
# This is the per-migration method result for this batch, and
# _run_migration will either continue on to the next migration,
# or stop if up to this point we've processed max_count of
# records across all migration methods.
migrations[name] = found, done
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations, exceptions
@args('--max-count', metavar='<number>', dest='max_count',
help='Maximum number of objects to consider')
def online_data_migrations(self, max_count=None):
ctxt = context.get_admin_context()
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_number'))
return 127
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete') % max_count)
ran = None
migration_info = {}
exceptions = False
while ran is None or ran != 0:
migrations, exceptions = self._run_migration(ctxt, max_count)
ran = 0
# For each batch of migration method results, build the cumulative
# set of results.
for name in migrations:
migration_info.setdefault(name, (0, 0))
migration_info[name] = (
migration_info[name][0] + migrations[name][0],
migration_info[name][1] + migrations[name][1],
)
ran += migrations[name][1]
if not unlimited:
break
t = prettytable.PrettyTable([_('Migration'),
_('Total Needed'), # Really: Total Found
_('Completed')])
for name in sorted(migration_info.keys()):
info = migration_info[name]
t.add_row([name, info[0], info[1]])
print(t)
# NOTE(imacdonn): In the "unlimited" case, the loop above will only
# terminate when all possible migrations have been effected. If we're
# still getting exceptions, there's a problem that requires
# intervention. In the max-count case, exceptions are only considered
# fatal if no work was done by any other migrations ("not ran"),
# because otherwise work may still remain to be done, and that work
# may resolve dependencies for the failing migrations.
if exceptions and (unlimited or not ran):
print(_("Some migrations failed unexpectedly. Check log for "
"details."))
return 2
# TODO(mriedem): Potentially add another return code for
# "there are more migrations, but not completable right now"
return ran and 1 or 0
class ApiDbCommands(object):
"""Class for managing the api database."""
def __init__(self):
pass
@args('version', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version, database='api')
def version(self):
"""Print the current database version."""
print(migration.db_version(database='api'))
class CellV2Commands(object):
"""Commands for managing cells v2."""
def _validate_transport_url(self, transport_url, warn_about_none=True):
if not transport_url:
if not CONF.transport_url:
if warn_about_none:
print(_(
'Must specify --transport-url if '
'[DEFAULT]/transport_url is not set in the '
'configuration file.'))
return None
print(_('--transport-url not provided in the command line, '
'using the value [DEFAULT]/transport_url from the '
'configuration file'))
transport_url = CONF.transport_url
try:
messaging.TransportURL.parse(conf=CONF,
url=objects.CellMapping.format_mq_url(
transport_url))
except (messaging.InvalidTransportURL, ValueError) as e:
print(_('Invalid transport URL: %s') % str(e))
return None
return transport_url
def _validate_database_connection(
self, database_connection, warn_about_none=True):
if not database_connection:
if not CONF.database.connection:
if warn_about_none:
print(_(
'Must specify --database_connection if '
'[database]/connection is not set in the '
'configuration file.'))
return None
print(_('--database_connection not provided in the command line, '
'using the value [database]/connection from the '
'configuration file'))
return CONF.database.connection
return database_connection
def _non_unique_transport_url_database_connection_checker(self, ctxt,
cell_mapping, transport_url, database_connection):
for cell in objects.CellMappingList.get_all(ctxt):
if cell_mapping and cell.uuid == cell_mapping.uuid:
# If we're looking for a specific cell, then don't check
# that one for same-ness to allow idempotent updates
continue
if (cell.database_connection == database_connection or
cell.transport_url == transport_url):
print(_('The specified transport_url and/or '
'database_connection combination already exists '
'for another cell with uuid %s.') % cell.uuid)
return True
return False
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
def simple_cell_setup(self, transport_url=None):
"""Simple cellsv2 setup.
This simplified command is for use by existing non-cells users to
configure the default environment. Returns 0 if setup is completed (or
has already been done) and 1 if no hosts are reporting (and this cannot
be mapped).
"""
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
ctxt = context.RequestContext()
try:
cell0_mapping = self._map_cell0()
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
cell0_mapping = objects.CellMapping.get_by_uuid(
ctxt, objects.CellMapping.CELL0_UUID)
# Run migrations so cell0 is usable
with context.target_cell(ctxt, cell0_mapping) as cctxt:
try:
migration.db_sync(None, context=cctxt)
except db_exc.DBError as ex:
print(_('Unable to sync cell0 schema: %s') % ex)
cell_uuid = self._map_cell_and_hosts(transport_url)
if cell_uuid is None:
# There are no compute hosts which means no cell_mapping was
# created. This should also mean that there are no instances.
return 1
self.map_instances(cell_uuid)
return 0
@args('--database_connection',
metavar='<database_connection>',
help='The database connection url for cell0. '
'This is optional. If not provided, a standard database '
'connection will be used based on the main database connection '
'from the Nova configuration.'
)
def map_cell0(self, database_connection=None):
"""Create a cell mapping for cell0.
cell0 is used for instances that have not been scheduled to any cell.
This generally applies to instances that have encountered an error
before they have been scheduled.
This command creates a cell mapping for this special cell which
requires a database to store the instance data.
Returns 0 if cell0 created successfully or already setup.
"""
try:
self._map_cell0(database_connection=database_connection)
except db_exc.DBDuplicateEntry:
print(_('Cell0 is already setup'))
return 0
def _map_cell0(self, database_connection=None):
"""Faciliate creation of a cell mapping for cell0.
See map_cell0 for more.
"""
def cell0_default_connection():
# If no database connection is provided one is generated
# based on the database connection url.
# The cell0 database will use the same database scheme and
# netloc as the main database, with a related path.
# NOTE(sbauza): The URL has to be RFC1738 compliant in order to
# be usable by sqlalchemy.
connection = CONF.database.connection
# sqlalchemy has a nice utility for parsing database connection
# URLs so we use that here to get the db name so we don't have to
# worry about parsing and splitting a URL which could have special
# characters in the password, which makes parsing a nightmare.
url = sqla_url.make_url(connection)
# TODO(gibi): remove hasattr() conditional in favor of "url.set()"
# when SQLAlchemy 1.4 is the minimum version in requirements
if hasattr(url, "set"):
url = url.set(database=url.database + '_cell0')
else:
# TODO(zzzeek): remove when SQLAlchemy 1.4
# is the minimum version in requirements
url.database = url.database + '_cell0'
return urlparse.unquote(str(url))
dbc = database_connection or cell0_default_connection()
ctxt = context.RequestContext()
# A transport url of 'none://' is provided for cell0. RPC should not
# be used to access cell0 objects. Cells transport switching will
# ignore any 'none' transport type.
cell_mapping = objects.CellMapping(
ctxt, uuid=objects.CellMapping.CELL0_UUID, name="cell0",
transport_url="none:///",
database_connection=dbc)
cell_mapping.create()
return cell_mapping
def _get_and_map_instances(self, ctxt, cell_mapping, limit, marker):
filters = {}
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_filters(
cctxt.elevated(read_deleted='yes'), filters,
sort_key='created_at', sort_dir='asc', limit=limit,
marker=marker)
for instance in instances:
try:
mapping = objects.InstanceMapping(ctxt)
mapping.instance_uuid = instance.uuid
mapping.cell_mapping = cell_mapping
mapping.project_id = instance.project_id
mapping.user_id = instance.user_id
mapping.create()
except db_exc.DBDuplicateEntry:
continue
if len(instances) == 0 or len(instances) < limit:
# We've hit the end of the instances table
marker = None
else:
marker = instances[-1].uuid
return marker
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True,
help='Unmigrated instances will be mapped to the cell with the '
'uuid provided.')
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to map. If not set, all instances '
'in the cell will be mapped in batches of 50. If you have a '
'large number of instances, consider specifying a custom value '
'and run the command until it exits with 0.')
@args('--reset', action='store_true', dest='reset_marker',
help='The command will start from the beginning as opposed to the '
'default behavior of starting from where the last run '
'finished')
def map_instances(self, cell_uuid, max_count=None, reset_marker=None):
"""Map instances into the provided cell.
Instances in the nova database of the provided cell (nova database
info is obtained from the nova-api database) will be queried from
oldest to newest and if unmapped, will be mapped to the provided cell.
A max-count can be set on the number of instance to map in a single
run. Repeated runs of the command will start from where the last run
finished so it is not necessary to increase max-count to finish. A
reset option can be passed which will reset the marker, thus making the
command start from the beginning as opposed to the default behavior of
starting from where the last run finished. An exit code of 0 indicates
that all instances have been mapped.
"""
# NOTE(stephenfin): The support for batching in this command relies on
# a bit of a hack. We initially process N instance-cell mappings, where
# N is the value of '--max-count' if provided else 50. To ensure we
# can continue from N on the next iteration, we store a instance-cell
# mapping object with a special name and the UUID of the last
# instance-cell mapping processed (N - 1) in munged form. On the next
# iteration, we search for the special name and unmunge the UUID to
# pick up where we left off. This is done until all mappings are
# processed. The munging is necessary as there's a unique constraint on
# the UUID field and we need something reversable. For more
# information, see commit 9038738d0.
if max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
map_all = False
if max_count < 1:
print(_('Must supply a positive value for max-count'))
return 127
else:
map_all = True
max_count = 50
ctxt = context.RequestContext()
marker_project_id = 'INSTANCE_MIGRATION_MARKER'
# Validate the cell exists, this will raise if not
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Check for a marker from a previous run
marker_mapping = objects.InstanceMappingList.get_by_project_id(ctxt,
marker_project_id)
if len(marker_mapping) == 0:
marker = None
else:
# There should be only one here
marker = marker_mapping[0].instance_uuid.replace(' ', '-')
if reset_marker:
marker = None
marker_mapping[0].destroy()
next_marker = True
while next_marker is not None:
next_marker = self._get_and_map_instances(ctxt, cell_mapping,
max_count, marker)
marker = next_marker
if not map_all:
break
if next_marker:
# Don't judge me. There's already an InstanceMapping with this UUID
# so the marker needs to be non destructively modified.
next_marker = next_marker.replace('-', ' ')
# This is just the marker record, so set user_id to the special
# marker name as well.
objects.InstanceMapping(ctxt, instance_uuid=next_marker,
project_id=marker_project_id,
user_id=marker_project_id).create()
return 1
return 0
def _map_cell_and_hosts(self, transport_url, name=None, verbose=False):
ctxt = context.RequestContext()
cell_mapping_uuid = cell_mapping = None
# First, try to detect if a CellMapping has already been created
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
if not compute_nodes:
print(_('No hosts found to map to cell, exiting.'))
return None
missing_nodes = set()
for compute_node in compute_nodes:
try:
host_mapping = objects.HostMapping.get_by_host(
ctxt, compute_node.host)
except exception.HostMappingNotFound:
missing_nodes.add(compute_node.host)
else:
if verbose:
print(_(
'Host %(host)s is already mapped to cell %(uuid)s'
) % {'host': host_mapping.host,
'uuid': host_mapping.cell_mapping.uuid})
# Re-using the existing UUID in case there is already a mapping
# NOTE(sbauza): There could be possibly multiple CellMappings
# if the operator provides another configuration file and moves
# the hosts to another cell v2, but that's not really something
# we should support.
cell_mapping_uuid = host_mapping.cell_mapping.uuid
if not missing_nodes:
print(_('All hosts are already mapped to cell(s).'))
return cell_mapping_uuid
# Create the cell mapping in the API database
if cell_mapping_uuid is not None:
cell_mapping = objects.CellMapping.get_by_uuid(
ctxt, cell_mapping_uuid)
if cell_mapping is None:
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt, uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=CONF.database.connection)
cell_mapping.create()
# Pull the hosts from the cell database and create the host mappings
for compute_host in missing_nodes:
host_mapping = objects.HostMapping(
ctxt, host=compute_host, cell_mapping=cell_mapping)
host_mapping.create()
if verbose:
print(cell_mapping_uuid)
return cell_mapping_uuid
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help='The transport url for the cell message queue')
@args('--name', metavar='<cell_name>', help='The name of the cell')
@args('--verbose', action='store_true',
help='Output the cell mapping uuid for any newly mapped hosts.')
def map_cell_and_hosts(self, transport_url=None, name=None, verbose=False):
"""EXPERIMENTAL. Create a cell mapping and host mappings for a cell.
Users not dividing their cloud into multiple cells will be a single
cell v2 deployment and should specify:
nova-manage cell_v2 map_cell_and_hosts --config-file <nova.conf>
Users running multiple cells can add a cell v2 by specifying:
nova-manage cell_v2 map_cell_and_hosts --config-file <cell nova.conf>
"""
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
self._map_cell_and_hosts(transport_url, name, verbose)
# online_data_migrations established a pattern of 0 meaning everything
# is done, 1 means run again to do more work. This command doesn't do
# partial work so 0 is appropriate.
return 0
@args('--uuid', metavar='<instance_uuid>', dest='uuid', required=True,
help=_('The instance UUID to verify'))
@args('--quiet', action='store_true', dest='quiet',
help=_('Do not print anything'))
def verify_instance(self, uuid, quiet=False):
"""Verify instance mapping to a cell.
This command is useful to determine if the cellsv2 environment is
properly setup, specifically in terms of the cell, host, and instance
mapping records required.
This prints one of three strings (and exits with a code) indicating
whether the instance is successfully mapped to a cell (0), is unmapped
due to an incomplete upgrade (1), unmapped due to normally transient
state (2), it is a deleted instance which has instance mapping (3),
or it is an archived instance which still has an instance mapping (4).
"""
def say(string):
if not quiet:
print(string)
ctxt = context.get_admin_context()
try:
mapping = objects.InstanceMapping.get_by_instance_uuid(
ctxt, uuid)
except exception.InstanceMappingNotFound:
say('Instance %s is not mapped to a cell '
'(upgrade is incomplete) or instance '
'does not exist' % uuid)
return 1
if mapping.cell_mapping is None:
say('Instance %s is not mapped to a cell' % uuid)
return 2
else:
with context.target_cell(ctxt, mapping.cell_mapping) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
try:
el_ctx = cctxt.elevated(read_deleted='yes')
instance = objects.Instance.get_by_uuid(el_ctx, uuid)
# instance is deleted
if instance:
say('The instance with uuid %s has been deleted.'
% uuid)
say('Execute '
'`nova-manage db archive_deleted_rows` '
'command to archive this deleted '
'instance and remove its instance_mapping.')
return 3
except exception.InstanceNotFound:
# instance is archived
say('The instance with uuid %s has been archived.'
% uuid)
say('However its instance_mapping remains.')
return 4
# instance is alive and mapped to a cell
say('Instance %s is in cell: %s (%s)' % (
uuid,
mapping.cell_mapping.name,
mapping.cell_mapping.uuid))
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help='If provided only this cell will be searched for new hosts to '
'map.')
@args('--verbose', action='store_true',
help=_('Provide detailed output when discovering hosts.'))
@args('--strict', action='store_true',
help=_('Considered successful (exit code 0) only when an unmapped '
'host is discovered. Any other outcome will be considered a '
'failure (non-zero exit code).'))
@args('--by-service', action='store_true', default=False,
dest='by_service',
help=_('Discover hosts by service instead of compute node'))
def discover_hosts(self, cell_uuid=None, verbose=False, strict=False,
by_service=False):
"""Searches cells, or a single cell, and maps found hosts.
When a new host is added to a deployment it will add a service entry
to the db it's configured to use. This command will check the db for
each cell, or a single one if passed in, and map any hosts which are
not currently mapped. If a host is already mapped nothing will be done.
This command should be run once after all compute hosts have been
deployed and should not be run in parallel. When run in parallel,
the commands will collide with each other trying to map the same hosts
in the database at the same time.
"""
def status_fn(msg):
if verbose:
print(msg)
ctxt = context.RequestContext()
try:
hosts = host_mapping_obj.discover_hosts(ctxt, cell_uuid, status_fn,
by_service)
except exception.HostMappingExists as exp:
print(_('ERROR: Duplicate host mapping was encountered. This '
'command should be run once after all compute hosts have '
'been deployed and should not be run in parallel. When '
'run in parallel, the commands will collide with each '
'other trying to map the same hosts in the database at '
'the same time. Error: %s') % exp)
return 2
# discover_hosts will return an empty list if no hosts are discovered
if strict:
return int(not hosts)
@action_description(
_("Add a new cell to nova API database. "
"DB and MQ urls can be provided directly "
"or can be taken from config. The result is cell uuid."))
@args('--name', metavar='<cell_name>', help=_('The name of the cell'))
@args('--database_connection', metavar='<database_connection>',
dest='database_connection',
help=_('The database url for the cell database'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('The transport url for the cell message queue'))
@args('--verbose', action='store_true',
help=_('Output the uuid of the created cell'))
@args('--disabled', action='store_true',
help=_('To create a pre-disabled cell.'))
def create_cell(self, name=None, database_connection=None,
transport_url=None, verbose=False, disabled=False):
ctxt = context.get_context()
transport_url = self._validate_transport_url(transport_url)
if not transport_url:
return 1
database_connection = self._validate_database_connection(
database_connection)
if not database_connection:
return 1
if (self._non_unique_transport_url_database_connection_checker(ctxt,
None, transport_url, database_connection)):
return 2
cell_mapping_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(
ctxt,
uuid=cell_mapping_uuid, name=name,
transport_url=transport_url,
database_connection=database_connection,
disabled=disabled)
cell_mapping.create()
if verbose:
print(cell_mapping_uuid)
return 0
@args('--verbose', action='store_true',
help=_('Show sensitive details, such as passwords'))
def list_cells(self, verbose=False):
"""Lists the v2 cells in the deployment.
By default the cell name, uuid, disabled state, masked transport
URL and database connection details are shown. Use the --verbose
option to see transport URL and database connection with their
sensitive details.
"""
cell_mappings = objects.CellMappingList.get_all(
context.get_admin_context())
field_names = [_('Name'), _('UUID'), _('Transport URL'),
_('Database Connection'), _('Disabled')]
t = prettytable.PrettyTable(field_names)
for cell in sorted(cell_mappings,
# CellMapping.name is optional
key=lambda _cell: _cell.name or ''):
fields = [cell.name or '', cell.uuid]
if verbose:
fields.extend([cell.transport_url, cell.database_connection])
else:
fields.extend([
mask_passwd_in_url(cell.transport_url),
mask_passwd_in_url(cell.database_connection)])
fields.extend([cell.disabled])
t.add_row(fields)
print(t)
return 0
@args('--force', action='store_true', default=False,
help=_('Delete hosts and instance_mappings that belong '
'to the cell as well.'))
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to delete.'))
def delete_cell(self, cell_uuid, force=False):
"""Delete an empty cell by the given uuid.
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* It has hosts and force is False.
* It has instance mappings and force is False.
If force is True and the cell has hosts and/or instance_mappings, they
are deleted as well (as long as there are no living instances).
Returns 0 in the following cases.
* The empty cell is found and deleted successfully.
* The cell has hosts and force is True then the cell, hosts and
instance_mappings are deleted successfully; if there are no
living instances.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
# Check to see if there are any HostMappings for this cell.
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
nodes = []
if host_mappings:
if not force:
print(_('There are existing hosts mapped to cell with uuid '
'%s.') % cell_uuid)
return 2
# We query for the compute nodes in the cell,
# so that they can be unmapped.
with context.target_cell(ctxt, cell_mapping) as cctxt:
nodes = objects.ComputeNodeList.get_all(cctxt)
# Check to see if there are any InstanceMappings for this cell.
instance_mappings = objects.InstanceMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
if instance_mappings:
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_all(cctxt)
if instances:
# There are instances in the cell.
print(_('There are existing instances mapped to cell with '
'uuid %s.') % cell_uuid)
return 3
else:
if not force:
# There are no instances in the cell but the records remain
# in the 'instance_mappings' table.
print(_("There are instance mappings to cell with uuid "
"%s, but all instances have been deleted "
"in the cell.") % cell_uuid)
print(_("So execute 'nova-manage db archive_deleted_rows' "
"to delete the instance mappings."))
return 4
# Delete instance_mappings of the deleted instances
for instance_mapping in instance_mappings:
instance_mapping.destroy()
# Unmap the compute nodes so that they can be discovered
# again in future, if needed.
for node in nodes:
node.mapped = 0
node.save()
# Delete hosts mapped to the cell.
for host_mapping in host_mappings:
host_mapping.destroy()
# There are no hosts or instances mapped to the cell so delete it.
cell_mapping.destroy()
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell to update.'))
@args('--name', metavar='<cell_name>', dest='name',
help=_('Set the cell name.'))
@args('--transport-url', metavar='<transport_url>', dest='transport_url',
help=_('Set the cell transport_url. NOTE that running nodes '
'will not see the change until restart!'))
@args('--database_connection', metavar='<database_connection>',
dest='db_connection',
help=_('Set the cell database_connection. NOTE that running nodes '
'will not see the change until restart!'))
@args('--disable', action='store_true', dest='disable',
help=_('Disables the cell. Note that the scheduling will be blocked '
'to this cell until its enabled and followed by a SIGHUP of '
'nova-scheduler service.'))
@args('--enable', action='store_true', dest='enable',
help=_('Enables the cell. Note that this makes a disabled cell '
'available for scheduling after a SIGHUP of the '
'nova-scheduler service'))
def update_cell(self, cell_uuid, name=None, transport_url=None,
db_connection=None, disable=False, enable=False):
"""Updates the properties of a cell by the given uuid.
If the cell is not found by uuid, this command will return an exit
code of 1. If the provided transport_url or/and database_connection
is/are same as another cell, this command will return an exit code
of 3. If the properties cannot be set, this will return 2. If an
attempt is made to disable and enable a cell at the same time, this
command will exit with a return code of 4. If an attempt is made to
disable or enable cell0 this command will exit with a return code of 5.
Otherwise, the exit code will be 0.
NOTE: Updating the transport_url or database_connection fields on
a running system will NOT result in all nodes immediately using the
new values. Use caution when changing these values.
NOTE (tssurya): The scheduler will not notice that a cell has been
enabled/disabled until it is restarted or sent the SIGHUP signal.
"""
ctxt = context.get_admin_context()
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
if name:
cell_mapping.name = name
# Having empty transport_url and db_connection means leaving the
# existing values
transport_url = self._validate_transport_url(
transport_url, warn_about_none=False)
db_connection = self._validate_database_connection(
db_connection, warn_about_none=False)
if (self._non_unique_transport_url_database_connection_checker(ctxt,
cell_mapping, transport_url, db_connection)):
# We use the return code 3 before 2 to avoid changing the
# semantic meanings of return codes.
return 3
if transport_url:
cell_mapping.transport_url = transport_url
if db_connection:
cell_mapping.database_connection = db_connection
if disable and enable:
print(_('Cell cannot be disabled and enabled at the same time.'))
return 4
if disable or enable:
if cell_mapping.is_cell0():
print(_('Cell0 cannot be disabled.'))
return 5
elif disable and not cell_mapping.disabled:
cell_mapping.disabled = True
elif enable and cell_mapping.disabled:
cell_mapping.disabled = False
elif disable and cell_mapping.disabled:
print(_('Cell %s is already disabled') % cell_uuid)
elif enable and not cell_mapping.disabled:
print(_('Cell %s is already enabled') % cell_uuid)
try:
cell_mapping.save()
except Exception as e:
print(_('Unable to update CellMapping: %s') % e)
return 2
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
help=_('The uuid of the cell.'))
def list_hosts(self, cell_uuid=None):
"""Lists the hosts in one or all v2 cells."""
ctxt = context.get_admin_context()
if cell_uuid:
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
host_mappings = objects.HostMappingList.get_by_cell_id(
ctxt, cell_mapping.id)
else:
host_mappings = objects.HostMappingList.get_all(ctxt)
field_names = [_('Cell Name'), _('Cell UUID'), _('Hostname')]
t = prettytable.PrettyTable(field_names)
for host in sorted(host_mappings, key=lambda _host: _host.host):
fields = [host.cell_mapping.name, host.cell_mapping.uuid,
host.host]
t.add_row(fields)
print(t)
return 0
@args('--cell_uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=True, help=_('The uuid of the cell.'))
@args('--host', metavar='<host>', dest='host',
required=True, help=_('The host to delete.'))
def delete_host(self, cell_uuid, host):
"""Delete a host in a cell (host mappings) by the given host name
This command will return a non-zero exit code in the following cases.
* The cell is not found by uuid.
* The host is not found by host name.
* The host is not in the cell.
* The host has instances.
Returns 0 if the host is deleted successfully.
NOTE: The scheduler caches host-to-cell mapping information so when
deleting a host the scheduler may need to be restarted or sent the
SIGHUP signal.
"""
ctxt = context.get_admin_context()
# Find the CellMapping given the uuid.
try:
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 1
try:
host_mapping = objects.HostMapping.get_by_host(ctxt, host)
except exception.HostMappingNotFound:
print(_('The host %s was not found.') % host)
return 2
if host_mapping.cell_mapping.uuid != cell_mapping.uuid:
print(_('The host %(host)s was not found '
'in the cell %(cell_uuid)s.') % {'host': host,
'cell_uuid': cell_uuid})
return 3
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_host(cctxt, host)
try:
nodes = objects.ComputeNodeList.get_all_by_host(cctxt, host)
except exception.ComputeHostNotFound:
nodes = []
if instances:
print(_('There are instances on the host %s.') % host)
return 4
for node in nodes:
node.mapped = 0
node.save()
host_mapping.destroy()
return 0
class PlacementCommands(object):
"""Commands for managing placement resources."""
@staticmethod
def _get_compute_node_uuid(ctxt, instance, node_cache):
"""Find the ComputeNode.uuid for the given Instance
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to lookup a compute node
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:returns: ComputeNode.uuid for the given instance
:raises: nova.exception.ComputeHostNotFound
"""
if instance.node in node_cache:
return node_cache[instance.node]
compute_node = objects.ComputeNode.get_by_host_and_nodename(
ctxt, instance.host, instance.node)
node_uuid = compute_node.uuid
node_cache[instance.node] = node_uuid
return node_uuid
@staticmethod
def _get_ports(ctxt, instance, neutron):
"""Return the ports that are bound to the instance
:param ctxt: nova.context.RequestContext
:param instance: the instance to return the ports for
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:return: a list of neutron port dict objects
:raise UnableToQueryPorts: If the neutron list ports query fails.
"""
try:
return neutron.list_ports(
ctxt, device_id=instance.uuid,
fields=['id', constants.RESOURCE_REQUEST,
constants.BINDING_PROFILE]
)['ports']
except neutron_client_exc.NeutronClientException as e:
raise exception.UnableToQueryPorts(
instance_uuid=instance.uuid, error=str(e))
@staticmethod
def _has_request_but_no_allocation(port, neutron):
has_res_req = neutron_api.API()._has_resource_request(
context.get_admin_context(), port, neutron)
binding_profile = neutron_api.get_binding_profile(port)
allocation = binding_profile.get(constants.ALLOCATION)
return has_res_req and not allocation
@staticmethod
def _merge_allocations(alloc1, alloc2):
"""Return a new allocation dict that contains the sum of alloc1 and
alloc2.
:param alloc1: a dict in the form of
{
<rp_uuid>: {'resources': {<resource class>: amount,
<resource class>: amount},
<rp_uuid>: {'resources': {<resource class>: amount},
}
:param alloc2: a dict in the same form as alloc1
:return: the merged allocation of alloc1 and alloc2 in the same format
"""
allocations = collections.defaultdict(
lambda: {'resources': collections.defaultdict(int)})
for alloc in [alloc1, alloc2]:
for rp_uuid in alloc:
for rc, amount in alloc[rp_uuid]['resources'].items():
allocations[rp_uuid]['resources'][rc] += amount
return allocations
@staticmethod
def _get_resource_request_from_ports(
ctxt: context.RequestContext,
ports: ty.List[ty.Dict[str, ty.Any]]
) -> ty.Tuple[
ty.Dict[str, ty.List['objects.RequestGroup']],
'objects.RequestLevelParams']:
"""Collect RequestGroups and RequestLevelParams for all ports
:param ctxt: the request context
:param ports: a list of port dicts
:returns: A two tuple where the first item is a dict mapping port
uuids to a list of request groups coming from that port, the
second item is a combined RequestLevelParams object from all ports.
"""
groups = {}
request_level_params = objects.RequestLevelParams()
extended_res_req = (
neutron_api.API().has_extended_resource_request_extension(
ctxt)
)
for port in ports:
resource_request = port.get(constants.RESOURCE_REQUEST)
if extended_res_req:
groups[port['id']] = (
objects.RequestGroup.from_extended_port_request(
ctxt, resource_request
)
)
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
resource_request
)
)
else:
# This is the legacy format, only one group per port and no
# request level param support
# TODO(gibi): remove this path once the extended resource
# request extension is mandatory in neutron
groups[port['id']] = [
objects.RequestGroup.from_port_request(
ctxt, port['id'], resource_request
)
]
return groups, request_level_params
@staticmethod
def _get_port_binding_profile_allocation(
ctxt: context.RequestContext,
neutron: neutron_api.ClientWrapper,
port: ty.Dict[str, ty.Any],
request_groups: ty.List['objects.RequestGroup'],
resource_provider_mapping: ty.Dict[str, ty.List[str]],
) -> ty.Dict[str, str]:
"""Generate the value of the allocation key of the port binding profile
based on the provider mapping returned from placement
:param ctxt: the request context
:param neutron: the neutron client
:param port: the port dict from neutron
:param request_groups: the list of RequestGroups object generated from
the port resource request
:param resource_provider_mapping: The dict of request group to resource
provider mapping returned by the Placement allocation candidate
query
:returns: a dict mapping request group ids to resource provider uuids
in the form as Neutron expects in the port binding profile.
"""
if neutron_api.API().has_extended_resource_request_extension(
ctxt, neutron
):
# The extended resource request format also means that a
# port has more than a one request groups.
# Each request group id from the port needs to be mapped to
# a single provider id from the provider mappings. Each
# group from the port is mapped to a numbered request group
# in placement so we can assume that they are mapped to
# a single provider and therefore the provider mapping list
# has a single provider id.
allocation = {
group.requester_id: resource_provider_mapping[
group.requester_id][0]
for group in request_groups
}
else:
# This is the legacy resource request format where a port
# is mapped to a single request group
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
allocation = resource_provider_mapping[
port['id']][0]
return allocation
def _get_port_allocations_to_heal(
self, ctxt, instance, node_cache, placement, neutron, output):
"""Return the needed extra allocation for the ports of the instance.
:param ctxt: nova.context.RequestContext
:param instance: instance to get the port allocations for
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param output: function that takes a single message for verbose output
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise nova.exception.ComputeHostNotFound: if compute node of the
instance not found in the db.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise AllocationUpdateFailed: if there is either no allocation
candidate returned from placement for the missing port allocations
or there are more than one candidates making the healing
ambiguous.
:return: A two tuple where the first item is a dict of resources keyed
by RP uuid to be included in the instance allocation dict. The
second item is a list of port dicts to be updated in Neutron.
"""
# We need to heal port allocations for ports that have resource_request
# but do not have an RP uuid in the binding:profile.allocation field.
# We cannot use the instance info_cache to check the binding profile
# as this code needs to be able to handle ports that were attached
# before nova in stein started updating the allocation key in the
# binding:profile.
# In theory a port can be assigned to an instance without it being
# bound to any host (e.g. in case of shelve offload) but
# _heal_allocations_for_instance() already filters out instances that
# are not on any host.
ports_to_heal = [
port for port in self._get_ports(ctxt, instance, neutron)
if self._has_request_but_no_allocation(port, neutron)]
if not ports_to_heal:
# nothing to do, return early
return {}, []
node_uuid = self._get_compute_node_uuid(
ctxt, instance, node_cache)
# NOTE(gibi): We need to handle both legacy and extended resource
# request. So we need to handle ports with multiple request groups
# allocating from multiple providers.
# The logic what we follow here is pretty similar to the logic
# implemented in ComputeManager._allocate_port_resource_for_instance
# for the interface attach case. We just apply it to more then one
# ports here.
request_groups_per_port, req_lvl_params = (
self._get_resource_request_from_ports(ctxt, ports_to_heal)
)
# flatten the list of list of groups
request_groups = [
group
for groups in request_groups_per_port.values()
for group in groups
]
# we can have multiple request groups, it would be enough to restrict
# only one of them to the compute tree but for symmetry we restrict
# all of them
for request_group in request_groups:
request_group.in_tree = node_uuid
# If there are multiple groups then the group_policy is mandatory in
# the allocation candidate query. We can assume that if this instance
# booted successfully then we have the policy in the flavor. If there
# is only one group and therefore no policy then the value of the
# policy in the allocation candidate query is ignored, so we simply
# default it here.
group_policy = instance.flavor.extra_specs.get("group_policy", "none")
rr = scheduler_utils.ResourceRequest.from_request_groups(
request_groups, req_lvl_params, group_policy)
res = placement.get_allocation_candidates(ctxt, rr)
# NOTE(gibi): the get_allocation_candidates method has the
# @safe_connect decorator applied. Such decorator will return None
# if the connection to Placement is failed. So we raise an exception
# here. The case when Placement successfully return a response, even
# if it is a negative or empty response, the method will return a three
# tuple. That case is handled couple of lines below.
if not res:
raise exception.PlacementAPIConnectFailure()
alloc_reqs, __, __ = res
if not alloc_reqs:
port_ids = [port['id'] for port in ports_to_heal]
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=f'Placement returned no allocation candidate to fulfill '
f'the resource request of the port(s) {port_ids}'
)
if len(alloc_reqs) > 1:
# If there is more than one candidates then it is an ambiguous
# situation that we cannot handle here because selecting the right
# one might need extra information from the compute node. For
# example which PCI PF the VF is allocated from and which RP
# represents that PCI PF in placement.
# TODO(gibi): One way to get that missing information to resolve
# ambiguity would be to load up the InstancePciRequest objects and
# try to use the parent_if_name in their spec to find the proper
# candidate that allocates for the same port from the PF RP that
# has the same name.
port_ids = [port['id'] for port in ports_to_heal]
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=f'Placement returned more than one possible allocation '
f'candidates to fulfill the resource request of the '
f'port(s) {port_ids}. This script does not have enough '
f'information to select the proper candidate to heal the'
f'missing allocations. A possible way to heal the'
f'allocation of this instance is to migrate it to '
f'another compute as the migration process re-creates '
f'the full allocation on the target host.'
)
# so we have one candidate, lets use that to get the needed allocations
# and the provider mapping for the ports' binding profile
alloc_req = alloc_reqs[0]
allocations = alloc_req["allocations"]
provider_mappings = alloc_req["mappings"]
for port in ports_to_heal:
# We also need to record the RPs we are allocated from in the
# port. This will be sent back to Neutron before the allocation
# is updated in placement
profile_allocation = self._get_port_binding_profile_allocation(
ctxt, neutron, port, request_groups_per_port[port['id']],
provider_mappings
)
binding_profile = neutron_api.get_binding_profile(port)
binding_profile[constants.ALLOCATION] = profile_allocation
port[constants.BINDING_PROFILE] = binding_profile
output(_(
"Found a request group : resource provider mapping "
"%(mapping)s for the port %(port_uuid)s with resource request "
"%(request)s attached to the instance %(instance_uuid)s") %
{"mapping": profile_allocation, "port_uuid": port['id'],
"request": port.get(constants.RESOURCE_REQUEST),
"instance_uuid": instance.uuid}
)
return allocations, ports_to_heal
def _update_ports(self, neutron, ports_to_update, output):
succeeded = []
try:
for port in ports_to_update:
profile = neutron_api.get_binding_profile(port)
body = {
'port': {
constants.BINDING_PROFILE: profile
}
}
output(
_('Updating port %(port_uuid)s with attributes '
'%(attributes)s') %
{'port_uuid': port['id'], 'attributes': body['port']})
neutron.update_port(port['id'], body=body)
succeeded.append(port)
except neutron_client_exc.NeutronClientException as e:
output(
_('Updating port %(port_uuid)s failed: %(error)s') %
{'port_uuid': port['id'], 'error': str(e)})
# one of the port updates failed. We need to roll back the updates
# that succeeded before
self._rollback_port_updates(neutron, succeeded, output)
# we failed to heal so we need to stop but we successfully rolled
# back the partial updates so the admin can retry the healing.
raise exception.UnableToUpdatePorts(error=str(e))
@staticmethod
def _rollback_port_updates(neutron, ports_to_rollback, output):
# _update_ports() added the allocation key to these ports, so we need
# to remove them during the rollback.
manual_rollback_needed = []
last_exc = None
for port in ports_to_rollback:
profile = neutron_api.get_binding_profile(port)
profile.pop(constants.ALLOCATION)
body = {
'port': {
constants.BINDING_PROFILE: profile
}
}
try:
output(_('Rolling back port update for %(port_uuid)s') %
{'port_uuid': port['id']})
neutron.update_port(port['id'], body=body)
except neutron_client_exc.NeutronClientException as e:
output(
_('Rolling back update for port %(port_uuid)s failed: '
'%(error)s') % {'port_uuid': port['id'],
'error': str(e)})
# TODO(gibi): We could implement a retry mechanism with
# back off.
manual_rollback_needed.append(port['id'])
last_exc = e
if manual_rollback_needed:
# At least one of the port operation failed so we failed to roll
# back. There are partial updates in neutron. Human intervention
# needed.
raise exception.UnableToRollbackPortUpdates(
error=str(last_exc),
port_uuids=manual_rollback_needed)
def _heal_missing_alloc(self, ctxt, instance, node_cache):
node_uuid = self._get_compute_node_uuid(
ctxt, instance, node_cache)
# Now get the resource allocations for the instance based
# on its embedded flavor.
resources = scheduler_utils.resources_from_flavor(
instance, instance.flavor)
payload = {
'allocations': {
node_uuid: {'resources': resources},
},
'project_id': instance.project_id,
'user_id': instance.user_id,
'consumer_generation': None
}
return payload
def _heal_missing_project_and_user_id(self, allocations, instance):
allocations['project_id'] = instance.project_id
allocations['user_id'] = instance.user_id
return allocations
@staticmethod
def ensure_instance_has_no_vgpu_request(instance):
if instance.flavor.extra_specs.get("resources:VGPU"):
raise exception.HealvGPUAllocationNotSupported(
instance_uuid=instance.uuid)
@staticmethod
def ensure_instance_has_no_cyborg_device_profile_request(instance):
if instance.flavor.extra_specs.get("accel:device_profile"):
raise exception.HealDeviceProfileAllocationNotSupported(
instance_uuid=instance.uuid)
def _heal_allocations_for_instance(self, ctxt, instance, node_cache,
output, placement, dry_run,
heal_port_allocations, neutron,
force):
"""Checks the given instance to see if it needs allocation healing
:param ctxt: cell-targeted nova.context.RequestContext
:param instance: the instance to check for allocation healing
:param node_cache: dict of Instance.node keys to ComputeNode.uuid
values; this cache is updated if a new node is processed.
:param output: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param dry_run: Process instances and print output but do not commit
any changes.
:param heal_port_allocations: True if healing port allocation is
requested, False otherwise.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param force: True if force healing is requested for particular
instance, False otherwise.
:return: True if allocations were created or updated for the instance,
None if nothing needed to be done
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise UnableToUpdatePorts: if a port update failed in neutron but any
partial update was rolled back successfully.
:raise UnableToRollbackPortUpdates: if a port update failed in neutron
and the rollback of the partial updates also failed.
"""
if instance.task_state is not None:
output(_('Instance %(instance)s is undergoing a task '
'state transition: %(task_state)s') %
{'instance': instance.uuid,
'task_state': instance.task_state})
return
if instance.node is None:
output(_('Instance %s is not on a host.') % instance.uuid)
return
self.ensure_instance_has_no_vgpu_request(instance)
self.ensure_instance_has_no_cyborg_device_profile_request(instance)
try:
allocations = placement.get_allocs_for_consumer(
ctxt, instance.uuid)
except (ks_exc.ClientException,
exception.ConsumerAllocationRetrievalFailed) as e:
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid,
error=_("Allocation retrieval failed: %s") % e)
need_healing = False
# Placement response can have an empty {'allocations': {}} in it if
# there are no allocations for the instance
if not allocations.get('allocations'):
# This instance doesn't have allocations
need_healing = _CREATE
allocations = self._heal_missing_alloc(ctxt, instance, node_cache)
if (allocations.get('project_id') != instance.project_id or
allocations.get('user_id') != instance.user_id):
# We have an instance with allocations but not the correct
# project_id/user_id, so we want to update the allocations
# and re-put them. We don't use put_allocations here
# because we don't want to mess up shared or nested
# provider allocations.
need_healing = _UPDATE
allocations = self._heal_missing_project_and_user_id(
allocations, instance)
if force:
output(_('Force flag passed for instance %s') % instance.uuid)
need_healing = _UPDATE
# get default allocations
alloc = self._heal_missing_alloc(ctxt, instance, node_cache)
# set consumer generation of existing allocations
alloc["consumer_generation"] = allocations["consumer_generation"]
# set allocations
allocations = alloc
if heal_port_allocations:
to_heal = self._get_port_allocations_to_heal(
ctxt, instance, node_cache, placement, neutron, output)
port_allocations, ports_to_update = to_heal
else:
port_allocations, ports_to_update = {}, []
if port_allocations:
need_healing = need_healing or _UPDATE
# Merge in any missing port allocations
allocations['allocations'] = self._merge_allocations(
allocations['allocations'], port_allocations)
if need_healing:
if dry_run:
# json dump the allocation dict as it contains nested default
# dicts that is pretty hard to read in the verbose output
alloc = jsonutils.dumps(allocations)
if need_healing == _CREATE:
output(_('[dry-run] Create allocations for instance '
'%(instance)s: %(allocations)s') %
{'instance': instance.uuid,
'allocations': alloc})
elif need_healing == _UPDATE:
output(_('[dry-run] Update allocations for instance '
'%(instance)s: %(allocations)s') %
{'instance': instance.uuid,
'allocations': alloc})
else:
# First update ports in neutron. If any of those operations
# fail, then roll back the successful part of it and fail the
# healing. We do this first because rolling back the port
# updates is more straight-forward than rolling back allocation
# changes.
self._update_ports(neutron, ports_to_update, output)
# Now that neutron update succeeded we can try to update
# placement. If it fails we need to rollback every neutron port
# update done before.
resp = placement.put_allocations(ctxt, instance.uuid,
allocations)
if resp:
if need_healing == _CREATE:
output(_('Successfully created allocations for '
'instance %(instance)s.') %
{'instance': instance.uuid})
elif need_healing == _UPDATE:
output(_('Successfully updated allocations for '
'instance %(instance)s.') %
{'instance': instance.uuid})
return True
else:
# Rollback every neutron update. If we succeed to
# roll back then it is safe to stop here and let the admin
# retry. If the rollback fails then
# _rollback_port_updates() will raise another exception
# that instructs the operator how to clean up manually
# before the healing can be retried
self._rollback_port_updates(
neutron, ports_to_update, output)
raise exception.AllocationUpdateFailed(
consumer_uuid=instance.uuid, error='')
else:
output(_('The allocation of instance %s is up-to-date. '
'Nothing to be healed.') % instance.uuid)
return
def _heal_instances_in_cell(self, ctxt, max_count, unlimited, output,
placement, dry_run, instance_uuid,
heal_port_allocations, neutron,
force):
"""Checks for instances to heal in a given cell.
:param ctxt: cell-targeted nova.context.RequestContext
:param max_count: batch size (limit per instance query)
:param unlimited: True if all instances in the cell should be
processed, else False to just process $max_count instances
:param output: function that takes a single message for verbose output
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param dry_run: Process instances and print output but do not commit
any changes.
:param instance_uuid: UUID of a specific instance to process.
:param heal_port_allocations: True if healing port allocation is
requested, False otherwise.
:param neutron: nova.network.neutron.ClientWrapper to
communicate with Neutron
:param force: True if force healing is requested for particular
instance, False otherwise.
:return: Number of instances that had allocations created.
:raises: nova.exception.ComputeHostNotFound if a compute node for a
given instance cannot be found
:raises: AllocationCreateFailed if unable to create allocations for
a given instance against a given compute node resource provider
:raises: AllocationUpdateFailed if unable to update allocations for
a given instance with consumer project/user information
:raise UnableToQueryPorts: If the neutron list ports query fails.
:raise PlacementAPIConnectFailure: if placement API cannot be reached
:raise UnableToUpdatePorts: if a port update failed in neutron but any
partial update was rolled back successfully.
:raise UnableToRollbackPortUpdates: if a port update failed in neutron
and the rollback of the partial updates also failed.
"""
# Keep a cache of instance.node to compute node resource provider UUID.
# This will save some queries for non-ironic instances to the
# compute_nodes table.
node_cache = {}
# Track the total number of instances that have allocations created
# for them in this cell. We return when num_processed equals max_count
# and unlimited=True or we exhaust the number of instances to process
# in this cell.
num_processed = 0
# Get all instances from this cell which have a host and are not
# undergoing a task state transition. Go from oldest to newest.
# NOTE(mriedem): Unfortunately we don't have a marker to use
# between runs where the user is specifying --max-count.
# TODO(mriedem): Store a marker in system_metadata so we can
# automatically pick up where we left off without the user having
# to pass it in (if unlimited is False).
filters = {'deleted': False}
if instance_uuid:
filters['uuid'] = instance_uuid
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, expected_attrs=['flavor'])
while instances:
output(_('Found %s candidate instances.') % len(instances))
# For each instance in this list, we need to see if it has
# allocations in placement and if so, assume it's correct and
# continue.
for instance in instances:
if self._heal_allocations_for_instance(
ctxt, instance, node_cache, output, placement,
dry_run, heal_port_allocations, neutron, force):
num_processed += 1
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
# If a specific instance was requested we return here as well.
if (not unlimited and num_processed == max_count) or instance_uuid:
return num_processed
# Use a marker to get the next page of instances in this cell.
# Note that InstanceList doesn't support slice notation.
marker = instances[len(instances) - 1].uuid
instances = objects.InstanceList.get_by_filters(
ctxt, filters=filters, sort_key='created_at', sort_dir='asc',
limit=max_count, marker=marker, expected_attrs=['flavor'])
return num_processed
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "
"consumer project_id/user_id values in existing allocations or "
"missing allocations for ports having resource request, and "
"which are not undergoing a task state transition. For each "
"instance found, allocations are created (or updated) against the "
"compute node resource provider for that instance based on the "
"flavor associated with the instance. This command requires that "
"the [api_database]/connection and [placement] configuration "
"options are set."))
@args('--max-count', metavar='<max_count>', dest='max_count',
help='Maximum number of instances to process. If not specified, all '
'instances in each cell will be mapped in batches of 50. '
'If you have a large number of instances, consider specifying '
'a custom value and run the command until it exits with '
'0 or 4.')
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
@args('--dry-run', action='store_true', dest='dry_run', default=False,
help='Runs the command and prints output but does not commit any '
'changes. The return code should be 4.')
@args('--instance', metavar='<instance_uuid>', dest='instance_uuid',
help='UUID of a specific instance to process. If specified '
'--max-count has no effect. '
'The --cell and --instance options are mutually exclusive.')
@args('--skip-port-allocations', action='store_true',
dest='skip_port_allocations', default=False,
help='Skip the healing of the resource allocations of bound ports. '
'E.g. healing bandwidth resource allocation for ports having '
'minimum QoS policy rules attached. If your deployment does '
'not use such a feature then the performance impact of '
'querying neutron ports for each instance can be avoided with '
'this flag.')
@args('--cell', metavar='<cell_uuid>', dest='cell_uuid',
help='Heal allocations within a specific cell. '
'The --cell and --instance options are mutually exclusive.')
@args('--force', action='store_true', dest='force', default=False,
help='Force heal allocations. Requires the --instance argument.')
def heal_allocations(self, max_count=None, verbose=False, dry_run=False,
instance_uuid=None, skip_port_allocations=False,
cell_uuid=None, force=False):
"""Heals instance allocations in the Placement service
Return codes:
* 0: Command completed successfully and allocations were created.
* 1: --max-count was reached and there are more instances to process.
* 2: Unable to find a compute node record for a given instance.
* 3: Unable to create (or update) allocations for an instance against
its compute node resource provider.
* 4: Command completed successfully but no allocations were created.
* 5: Unable to query ports from neutron
* 6: Unable to update ports in neutron
* 7: Cannot roll back neutron port updates. Manual steps needed.
* 8: Cannot heal instance with vGPU or Cyborg resource request
* 127: Invalid input.
"""
# NOTE(mriedem): Thoughts on ways to expand this:
# - allow filtering on enabled/disabled cells
# - add a force option to force allocations for instances which have
# task_state is not None (would get complicated during a migration);
# for example, this could cleanup ironic instances that have
# allocations on VCPU/MEMORY_MB/DISK_GB but are now using a custom
# resource class
# - deal with nested resource providers?
heal_port_allocations = not skip_port_allocations
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
# If user has provided both cell and instance
# Throw an error
if instance_uuid and cell_uuid:
print(_('The --cell and --instance options '
'are mutually exclusive.'))
return 127
if force and not instance_uuid:
print(_('The --instance flag is required '
'when using --force flag.'))
return 127
# TODO(mriedem): Rather than --max-count being both a total and batch
# count, should we have separate options to be specific, i.e. --total
# and --batch-size? Then --batch-size defaults to 50 and --total
# defaults to None to mean unlimited.
if instance_uuid:
max_count = 1
unlimited = False
elif max_count is not None:
try:
max_count = int(max_count)
except ValueError:
max_count = -1
unlimited = False
if max_count < 1:
print(_('Must supply a positive integer for --max-count.'))
return 127
else:
max_count = 50
unlimited = True
output(_('Running batches of %i until complete') % max_count)
ctxt = context.get_admin_context()
# If we are going to process a specific instance, just get the cell
# it is in up front.
if instance_uuid:
try:
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
cells = objects.CellMappingList(objects=[im.cell_mapping])
except exception.InstanceMappingNotFound:
print('Unable to find cell for instance %s, is it mapped? Try '
'running "nova-manage cell_v2 verify_instance" or '
'"nova-manage cell_v2 map_instances".' %
instance_uuid)
return 127
elif cell_uuid:
try:
# validate cell_uuid
cell = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# create CellMappingList
cells = objects.CellMappingList(objects=[cell])
except exception.CellMappingNotFound:
print(_('Cell with uuid %s was not found.') % cell_uuid)
return 127
else:
cells = objects.CellMappingList.get_all(ctxt)
if not cells:
output(_('No cells to process.'))
return 4
placement = report.SchedulerReportClient()
neutron = None
if heal_port_allocations:
neutron = neutron_api.get_client(ctxt, admin=True)
num_processed = 0
# TODO(mriedem): Use context.scatter_gather_skip_cell0.
for cell in cells:
# Skip cell0 since that is where instances go that do not get
# scheduled and hence would not have allocations against a host.
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
output(_('Looking for instances in cell: %s') % cell.identity)
limit_per_cell = max_count
if not unlimited:
# Adjust the limit for the next cell. For example, if the user
# only wants to process a total of 100 instances and we did
# 75 in cell1, then we only need 25 more from cell2 and so on.
limit_per_cell = max_count - num_processed
with context.target_cell(ctxt, cell) as cctxt:
try:
num_processed += self._heal_instances_in_cell(
cctxt, limit_per_cell, unlimited, output, placement,
dry_run, instance_uuid, heal_port_allocations, neutron,
force)
except exception.ComputeHostNotFound as e:
print(e.format_message())
return 2
except (
exception.AllocationCreateFailed,
exception.AllocationUpdateFailed,
exception.PlacementAPIConnectFailure
) as e:
print(e.format_message())
return 3
except exception.UnableToQueryPorts as e:
print(e.format_message())
return 5
except exception.UnableToUpdatePorts as e:
print(e.format_message())
return 6
except exception.UnableToRollbackPortUpdates as e:
print(e.format_message())
return 7
except (
exception.HealvGPUAllocationNotSupported,
exception.HealDeviceProfileAllocationNotSupported,
) as e:
print(e.format_message())
return 8
# Make sure we don't go over the max count. Note that we
# don't include instances that already have allocations in the
# max_count number, only the number of instances that have
# successfully created allocations.
# If a specific instance was provided then we'll just exit
# the loop and process it below (either return 4 or 0).
if num_processed == max_count and not instance_uuid:
output(_('Max count reached. Processed %s instances.')
% num_processed)
return 1
output(_('Processed %s instances.') % num_processed)
if not num_processed:
return 4
return 0
@staticmethod
def _get_rp_uuid_for_host(ctxt, host):
"""Finds the resource provider (compute node) UUID for the given host.
:param ctxt: cell-targeted nova RequestContext
:param host: name of the compute host
:returns: The UUID of the resource provider (compute node) for the host
:raises: nova.exception.HostMappingNotFound if no host_mappings record
is found for the host; indicates
"nova-manage cell_v2 discover_hosts" needs to be run on the cell.
:raises: nova.exception.ComputeHostNotFound if no compute_nodes record
is found in the cell database for the host; indicates the
nova-compute service on that host might need to be restarted.
:raises: nova.exception.TooManyComputesForHost if there are more than
one compute_nodes records in the cell database for the host which
is only possible (under normal circumstances) for ironic hosts but
ironic hosts are not currently supported with host aggregates so
if more than one compute node is found for the host, it is
considered an error which the operator will need to resolve
manually.
"""
# Get the host mapping to determine which cell it's in.
hm = objects.HostMapping.get_by_host(ctxt, host)
# Now get the compute node record for the host from the cell.
with context.target_cell(ctxt, hm.cell_mapping) as cctxt:
# There should really only be one, since only ironic
# hosts can have multiple nodes, and you can't have
# ironic hosts in aggregates for that reason. If we
# find more than one, it's an error.
nodes = objects.ComputeNodeList.get_all_by_host(
cctxt, host)
if len(nodes) > 1:
# This shouldn't happen, so we need to bail since we
# won't know which node to use.
raise exception.TooManyComputesForHost(
num_computes=len(nodes), host=host)
return nodes[0].uuid
@action_description(
_("Mirrors compute host aggregates to resource provider aggregates "
"in the Placement service. Requires the [api_database] and "
"[placement] sections of the nova configuration file to be "
"populated."))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
# TODO(mriedem): Add an option for the 'remove aggregate' behavior.
# We know that we want to mirror hosts aggregate membership to
# placement, but regarding removal, what if the operator or some external
# tool added the resource provider to an aggregate but there is no matching
# host aggregate, e.g. ironic nodes or shared storage provider
# relationships?
# TODO(mriedem): Probably want an option to pass a specific host instead of
# doing all of them.
def sync_aggregates(self, verbose=False):
"""Synchronizes nova host aggregates with resource provider aggregates
Adds nodes to missing provider aggregates in Placement.
NOTE: Depending on the size of your deployment and the number of
compute hosts in aggregates, this command could cause a non-negligible
amount of traffic to the placement service and therefore is
recommended to be run during maintenance windows.
Return codes:
* 0: Successful run
* 1: A host was found with more than one matching compute node record
* 2: An unexpected error occurred while working with the placement API
* 3: Failed updating provider aggregates in placement
* 4: Host mappings not found for one or more host aggregate members
* 5: Compute node records not found for one or more hosts
* 6: Resource provider not found by uuid for a given host
"""
# Start by getting all host aggregates.
ctxt = context.get_admin_context()
aggregate_api = api.AggregateAPI()
placement = aggregate_api.placement_client
aggregates = aggregate_api.get_aggregate_list(ctxt)
# Now we're going to loop over the existing compute hosts in aggregates
# and check to see if their corresponding resource provider, found via
# the host's compute node uuid, are in the same aggregate. If not, we
# add the resource provider to the aggregate in Placement.
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
output(_('Filling in missing placement aggregates'))
# Since hosts can be in more than one aggregate, keep track of the host
# to its corresponding resource provider uuid to avoid redundant
# lookups.
host_to_rp_uuid = {}
unmapped_hosts = set() # keep track of any missing host mappings
computes_not_found = set() # keep track of missing nodes
providers_not_found = {} # map of hostname to missing provider uuid
for aggregate in aggregates:
output(_('Processing aggregate: %s') % aggregate.name)
for host in aggregate.hosts:
output(_('Processing host: %s') % host)
rp_uuid = host_to_rp_uuid.get(host)
if not rp_uuid:
try:
rp_uuid = self._get_rp_uuid_for_host(ctxt, host)
host_to_rp_uuid[host] = rp_uuid
except exception.HostMappingNotFound:
# Don't fail on this now, we can dump it at the end.
unmapped_hosts.add(host)
continue
except exception.ComputeHostNotFound:
# Don't fail on this now, we can dump it at the end.
computes_not_found.add(host)
continue
except exception.TooManyComputesForHost as e:
# TODO(mriedem): Should we treat this like the other
# errors and not fail immediately but dump at the end?
print(e.format_message())
return 1
# We've got our compute node record, so now we can ensure that
# the matching resource provider, found via compute node uuid,
# is in the same aggregate in placement, found via aggregate
# uuid.
try:
placement.aggregate_add_host(ctxt, aggregate.uuid,
rp_uuid=rp_uuid)
output(_('Successfully added host (%(host)s) and '
'provider (%(provider)s) to aggregate '
'(%(aggregate)s).') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate.uuid})
except exception.ResourceProviderNotFound:
# The resource provider wasn't found. Store this for later.
providers_not_found[host] = rp_uuid
except exception.ResourceProviderAggregateRetrievalFailed as e:
print(e.message)
return 2
except exception.NovaException as e:
# The exception message is too generic in this case
print(_('Failed updating provider aggregates for '
'host (%(host)s), provider (%(provider)s) '
'and aggregate (%(aggregate)s). Error: '
'%(error)s') %
{'host': host, 'provider': rp_uuid,
'aggregate': aggregate.uuid,
'error': e.message})
return 3
# Now do our error handling. Note that there is no real priority on
# the error code we return. We want to dump all of the issues we hit
# so the operator can fix them before re-running the command, but
# whether we return 4 or 5 or 6 doesn't matter.
return_code = 0
if unmapped_hosts:
print(_('The following hosts were found in nova host aggregates '
'but no host mappings were found in the nova API DB. Run '
'"nova-manage cell_v2 discover_hosts" and then retry. '
'Missing: %s') % ','.join(unmapped_hosts))
return_code = 4
if computes_not_found:
print(_('Unable to find matching compute_nodes record entries in '
'the cell database for the following hosts; does the '
'nova-compute service on each host need to be restarted? '
'Missing: %s') % ','.join(computes_not_found))
return_code = 5
if providers_not_found:
print(_('Unable to find matching resource provider record in '
'placement with uuid for the following hosts: %s. Try '
'restarting the nova-compute service on each host and '
'then retry.') %
','.join('(%s=%s)' % (host, providers_not_found[host])
for host in sorted(providers_not_found.keys())))
return_code = 6
return return_code
def _get_instances_and_current_migrations(self, ctxt, cn_uuid):
if self.cn_uuid_mapping.get(cn_uuid):
cell_uuid, cn_host, cn_node = self.cn_uuid_mapping[cn_uuid]
else:
# We need to find the compute node record from all cells.
results = context.scatter_gather_skip_cell0(
ctxt, objects.ComputeNode.get_by_uuid, cn_uuid)
for result_cell_uuid, result in results.items():
if not context.is_cell_failure_sentinel(result):
cn = result
cell_uuid = result_cell_uuid
break
else:
return False
cn_host, cn_node = (cn.host, cn.hypervisor_hostname)
self.cn_uuid_mapping[cn_uuid] = (cell_uuid, cn_host, cn_node)
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
# Get all the active instances from this compute node
if self.instances_mapping.get(cn_uuid):
inst_uuids = self.instances_mapping[cn_uuid]
else:
# Get the instance list record from the cell.
with context.target_cell(ctxt, cell_mapping) as cctxt:
instances = objects.InstanceList.get_by_host_and_node(
cctxt, cn_host, cn_node, expected_attrs=[])
inst_uuids = [instance.uuid for instance in instances]
self.instances_mapping[cn_uuid] = inst_uuids
# Get all *active* migrations for this compute node
# NOTE(sbauza): Since migrations are transient, it's better to not
# cache the results as they could be stale
with context.target_cell(ctxt, cell_mapping) as cctxt:
migs = objects.MigrationList.get_in_progress_by_host_and_node(
cctxt, cn_host, cn_node)
mig_uuids = [migration.uuid for migration in migs]
return (inst_uuids, mig_uuids)
def _delete_allocations_from_consumer(self, ctxt, placement, provider,
consumer_uuid, consumer_type):
"""Deletes allocations from a resource provider with consumer UUID.
:param ctxt: nova.context.RequestContext
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param provider: Resource Provider to look at.
:param consumer_uuid: the consumer UUID having allocations.
:param consumer_type: the type of consumer,
either 'instance' or 'migration'
:returns: bool whether the allocations were deleted.
"""
# We need to be careful and only remove the allocations
# against this specific RP or we would delete the
# whole instance usage and then it would require some
# healing.
# TODO(sbauza): Remove this extra check once placement
# supports querying allocation delete on both
# consumer and resource provider parameters.
allocations = placement.get_allocs_for_consumer(
ctxt, consumer_uuid)
if len(allocations['allocations']) > 1:
# This consumer has resources spread among multiple RPs (think
# nested or shared for example)
# We then need to just update the usage to remove
# the orphaned resources on the specific RP
del allocations['allocations'][provider['uuid']]
try:
placement.put_allocations(
ctxt, consumer_uuid, allocations)
except exception.AllocationUpdateFailed:
return False
else:
try:
placement.delete_allocation_for_instance(
ctxt, consumer_uuid, consumer_type, force=True)
except exception.AllocationDeleteFailed:
return False
return True
def _check_orphaned_allocations_for_provider(self, ctxt, placement,
output, provider,
delete):
"""Finds orphaned allocations for a specific resource provider.
:param ctxt: nova.context.RequestContext
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param output: function that takes a single message for verbose output
:param provider: Resource Provider to look at.
:param delete: deletes the found orphaned allocations.
:return: a tuple (<number of orphaned allocs>, <number of faults>)
"""
num_processed = 0
faults = 0
# TODO(sbauza): Are we sure we have all Nova RCs ?
# FIXME(sbauza): Possibly use consumer types once Placement API
# supports them.
# NOTE(sbauza): We check allocations having *any* below RC, not having
# *all* of them.
NOVA_RCS = [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB, orc.VGPU,
orc.NET_BW_EGR_KILOBIT_PER_SEC,
orc.NET_BW_IGR_KILOBIT_PER_SEC,
orc.PCPU, orc.MEM_ENCRYPTION_CONTEXT]
# Since the RP can be a child RP, we need to get the root RP as it's
# the compute node UUID
# NOTE(sbauza): In case Placement doesn't support 1.14 microversion,
# that means we don't have nested RPs.
# Since we ask for microversion 1.14, all RPs have a root RP UUID.
cn_uuid = provider.get("root_provider_uuid")
# Now get all the existing instances and active migrations for this
# compute node
result = self._get_instances_and_current_migrations(ctxt, cn_uuid)
if result is False:
# We don't want to hard stop here because the compute service could
# have disappear while we could still have orphaned allocations.
output(_('The compute node for UUID %s can not be '
'found') % cn_uuid)
inst_uuids, mig_uuids = result or ([], [])
try:
pallocs = placement.get_allocations_for_resource_provider(
ctxt, provider['uuid'])
except exception.ResourceProviderAllocationRetrievalFailed:
print(_('Not able to find allocations for resource '
'provider %s.') % provider['uuid'])
raise
# Verify every allocations for each consumer UUID
for consumer_uuid, consumer_resources in pallocs.allocations.items():
consumer_allocs = consumer_resources['resources']
if any(rc in NOVA_RCS
for rc in consumer_allocs):
# We reset the consumer type for each allocation
consumer_type = None
# This is an allocation for Nova resources
# We need to guess whether the instance was deleted
# or if the instance is currently migrating
if not (consumer_uuid in inst_uuids or
consumer_uuid in mig_uuids):
# By default we suspect the orphaned allocation was for a
# migration...
consumer_type = 'migration'
if not(consumer_uuid in inst_uuids):
# ... but if we can't find it either for an instance,
# that means it was for this.
consumer_type = 'instance'
if consumer_type is not None:
output(_('Allocations were set against consumer UUID '
'%(consumer_uuid)s but no existing instances or '
'active migrations are related. ')
% {'consumer_uuid': consumer_uuid})
if delete:
deleted = self._delete_allocations_from_consumer(
ctxt, placement, provider, consumer_uuid,
consumer_type)
if not deleted:
print(_('Not able to delete allocations '
'for consumer UUID %s')
% consumer_uuid)
faults += 1
continue
output(_('Deleted allocations for consumer UUID '
'%(consumer_uuid)s on Resource Provider '
'%(rp)s: %(allocations)s')
% {'consumer_uuid': consumer_uuid,
'rp': provider['uuid'],
'allocations': consumer_allocs})
else:
output(_('Allocations for consumer UUID '
'%(consumer_uuid)s on Resource Provider '
'%(rp)s can be deleted: '
'%(allocations)s')
% {'consumer_uuid': consumer_uuid,
'rp': provider['uuid'],
'allocations': consumer_allocs})
num_processed += 1
return (num_processed, faults)
# TODO(sbauza): Move this to the scheduler report client ?
def _get_resource_provider(self, context, placement, uuid):
"""Returns a single Resource Provider by its UUID.
:param context: The nova.context.RequestContext auth context
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param uuid: A specific Resource Provider UUID
:return: the existing resource provider.
:raises: keystoneauth1.exceptions.base.ClientException on failure to
communicate with the placement API
"""
resource_providers = self._get_resource_providers(context, placement,
uuid=uuid)
if not resource_providers:
# The endpoint never returns a 404, it rather returns an empty list
raise exception.ResourceProviderNotFound(name_or_uuid=uuid)
return resource_providers[0]
def _get_resource_providers(self, context, placement, **kwargs):
"""Returns all resource providers regardless of their relationships.
:param context: The nova.context.RequestContext auth context
:param placement: nova.scheduler.client.report.SchedulerReportClient
to communicate with the Placement service API.
:param kwargs: extra attributes for the query string
:return: list of resource providers.
:raises: keystoneauth1.exceptions.base.ClientException on failure to
communicate with the placement API
"""
url = '/resource_providers'
if 'uuid' in kwargs:
url += '?uuid=%s' % kwargs['uuid']
resp = placement.get(url, global_request_id=context.global_id,
version='1.14')
if resp is None:
raise exception.PlacementAPIConnectFailure()
data = resp.json()
resource_providers = data.get('resource_providers')
return resource_providers
@action_description(
_("Audits orphaned allocations that are no longer corresponding to "
"existing instance resources. This command requires that "
"the [api_database]/connection and [placement] configuration "
"options are set."))
@args('--verbose', action='store_true', dest='verbose', default=False,
help='Provide verbose output during execution.')
@args('--resource_provider', metavar='<provider_uuid>',
dest='provider_uuid',
help='UUID of a specific resource provider to verify.')
@args('--delete', action='store_true', dest='delete', default=False,
help='Deletes orphaned allocations that were found.')
def audit(self, verbose=False, provider_uuid=None, delete=False):
"""Provides information about orphaned allocations that can be removed
Return codes:
* 0: Command completed successfully and no orphaned allocations exist.
* 1: An unexpected error happened during run.
* 3: Orphaned allocations were detected.
* 4: Orphaned allocations were detected and deleted.
* 127: Invalid input.
"""
ctxt = context.get_admin_context()
output = lambda msg: None
if verbose:
output = lambda msg: print(msg)
placement = report.SchedulerReportClient()
# Resets two in-memory dicts for knowing instances per compute node
self.cn_uuid_mapping = collections.defaultdict(tuple)
self.instances_mapping = collections.defaultdict(list)
num_processed = 0
faults = 0
if provider_uuid:
try:
resource_provider = self._get_resource_provider(
ctxt, placement, provider_uuid)
except exception.ResourceProviderNotFound:
print(_('Resource provider with UUID %s does not exist.') %
provider_uuid)
return 127
resource_providers = [resource_provider]
else:
resource_providers = self._get_resource_providers(ctxt, placement)
for provider in resource_providers:
nb_p, faults = self._check_orphaned_allocations_for_provider(
ctxt, placement, output, provider, delete)
num_processed += nb_p
if faults > 0:
print(_('The Resource Provider %s had problems when '
'deleting allocations. Stopping now. Please fix the '
'problem by hand and run again.') %
provider['uuid'])
return 1
if num_processed > 0:
suffix = 's.' if num_processed > 1 else '.'
output(_('Processed %(num)s allocation%(suffix)s')
% {'num': num_processed,
'suffix': suffix})
return 4 if delete else 3
return 0
class LibvirtCommands(object):
"""Commands for managing libvirt instances"""
@action_description(
_("Fetch the stored machine type of the instance from the database."))
@args('instance_uuid', metavar='<instance_uuid>',
help='UUID of instance to fetch the machine type for')
def get_machine_type(self, instance_uuid=None):
"""Fetch the stored machine type of the instance from the database.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find instance or instance mapping.
* 3: No machine type found for the instance.
"""
try:
ctxt = context.get_admin_context()
mtype = machine_type_utils.get_machine_type(ctxt, instance_uuid)
if mtype:
print(mtype)
return 0
else:
print(_('No machine type registered for instance %s' %
instance_uuid))
return 3
except (exception.InstanceNotFound,
exception.InstanceMappingNotFound) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
@action_description(
_("Set or update the stored machine type of the instance in the "
"database. This is only allowed for instances with a STOPPED, "
"SHELVED or SHELVED_OFFLOADED vm_state."))
@args('instance_uuid', metavar='<instance_uuid>',
help='UUID of instance to update')
@args('machine_type', metavar='<machine_type>',
help='Machine type to set')
@args('--force', action='store_true', default=False, dest='force',
help='Force the update of the stored machine type')
def update_machine_type(
self,
instance_uuid=None,
machine_type=None,
force=False
):
"""Set or update the machine type of a given instance.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find the instance or instance cell mapping.
* 3: Invalid instance vm_state.
* 4: Unable to move between underlying machine types (pc to q35 etc)
or to older versions.
* 5: Unsupported machine type.
"""
ctxt = context.get_admin_context()
if force:
print(_("Forcing update of machine type."))
try:
rtype, ptype = machine_type_utils.update_machine_type(
ctxt, instance_uuid, machine_type, force=force)
except exception.UnsupportedMachineType as e:
print(str(e))
return 5
except exception.InvalidMachineTypeUpdate as e:
print(str(e))
return 4
except exception.InstanceInvalidState as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
print(_("Updated instance %(instance_uuid)s machine type to "
"%(machine_type)s (previously %(previous_type)s)") %
{'instance_uuid': instance_uuid,
'machine_type': rtype,
'previous_type': ptype})
return 0
@action_description(
_("List the UUIDs of instances that do not have hw_machine_type set "
"in their image metadata"))
@args('--cell-uuid', metavar='<cell_uuid>', dest='cell_uuid',
required=False, help='UUID of cell from which to list instances')
def list_unset_machine_type(self, cell_uuid=None):
"""List the UUIDs of instances without image_hw_machine_type set
Return codes:
* 0: Command completed successfully, no instances found.
* 1: An unexpected error happened.
* 2: Unable to find cell mapping.
* 3: Instances found without hw_machine_type set.
"""
try:
instance_list = machine_type_utils.get_instances_without_type(
context.get_admin_context(), cell_uuid)
except exception.CellMappingNotFound as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
if instance_list:
print('\n'.join(i.uuid for i in instance_list))
return 3
else:
print(_("No instances found without hw_machine_type set."))
return 0
class VolumeAttachmentCommands(object):
@action_description(_("Show the details of a given volume attachment."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'volume_id', metavar='<volume_id>',
help='UUID of the volume')
@args(
'--connection_info', action='store_true',
default=False, dest='connection_info', required=False,
help='Only display the connection_info of the volume attachment.')
@args(
'--json', action='store_true',
default=False, dest='json', required=False,
help='Display output as json without a table.')
def show(
self,
instance_uuid=None,
volume_id=None,
connection_info=False,
json=False
):
"""Show attributes of a given volume attachment.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Instance not found.
* 3: Volume is not attached to instance.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if connection_info and json:
print(bdm.connection_info)
elif connection_info:
print(format_dict(jsonutils.loads(bdm.connection_info)))
elif json:
print(jsonutils.dumps(bdm))
else:
print(format_dict(bdm))
return 0
except exception.VolumeBDMNotFound as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
@action_description(_('Show the host connector for this host'))
@args(
'--json', action='store_true',
default=False, dest='json', required=False,
help='Display output as json without a table.')
def get_connector(self, json=False):
"""Show the host connector for this host.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
"""
try:
root_helper = utils.get_root_helper()
host_connector = connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
if json:
print(jsonutils.dumps(host_connector))
else:
print(format_dict(host_connector))
return 0
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
def _refresh(self, instance_uuid, volume_id, connector):
"""Refresh the bdm.connection_info associated with a volume attachment
Unlike the current driver BDM implementation under
nova.virt.block_device.DriverVolumeBlockDevice.refresh_connection_info
that simply GETs an existing volume attachment from cinder this method
cleans up any existing volume connections from the host before creating
a fresh attachment in cinder and populates the underlying BDM with
connection_info from the new attachment.
We can do that here as the command requires that the instance is
stopped, something that isn't always the case with the current driver
BDM approach and thus the two are kept seperate for the time being.
:param instance_uuid: UUID of instance
:param volume_id: ID of volume attached to the instance
:param connector: Connector with which to create the new attachment
"""
volume_api = cinder.API()
compute_rpcapi = rpcapi.ComputeAPI()
compute_api = api.API()
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(cctxt, instance_uuid)
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if instance.vm_state != obj_fields.InstanceState.STOPPED:
raise exception.InstanceInvalidState(
instance_uuid=instance_uuid, attr='vm_state',
state=instance.vm_state,
method='refresh connection_info (must be stopped)')
if instance.locked:
raise exception.InstanceInvalidState(
instance_uuid=instance_uuid, attr='locked', state='True',
method='refresh connection_info (must be unlocked)')
compute_api.lock(
cctxt, instance,
reason=(
f'Refreshing connection_info for BDM {bdm.uuid} '
f'associated with instance {instance_uuid} and volume '
f'{volume_id}.'))
# NOTE(lyarwood): Yes this is weird but we need to recreate the admin
# context here to ensure the lock above uses a unique request-id
# versus the following refresh and eventual unlock.
ctxt = context.get_admin_context()
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance_action = None
new_attachment_id = None
try:
# Log this as an instance action so operators and users are
# aware that this has happened.
instance_action = objects.InstanceAction.action_start(
cctxt, instance_uuid,
instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT)
# Create a blank attachment to keep the volume reserved
new_attachment_id = volume_api.attachment_create(
cctxt, volume_id, instance_uuid)['id']
# RPC call to the compute to cleanup the connections, which
# will in turn unmap the volume from the compute host
# TODO(lyarwood): Add delete_attachment as a kwarg to
# remove_volume_connection as is available in the private
# method within the manager.
compute_rpcapi.remove_volume_connection(
cctxt, instance, volume_id, instance.host)
# Delete the existing volume attachment if present in the bdm.
# This isn't present when the original attachment was made
# using the legacy cinderv2 APIs before the cinderv3 attachment
# based APIs were present.
if bdm.attachment_id:
volume_api.attachment_delete(cctxt, bdm.attachment_id)
# Update the attachment with host connector, this regenerates
# the connection_info that we can now stash in the bdm.
new_connection_info = volume_api.attachment_update(
cctxt, new_attachment_id, connector,
bdm.device_name)['connection_info']
# Before we save it to the BDM ensure the serial is stashed as
# is done in various other codepaths when attaching volumes.
if 'serial' not in new_connection_info:
new_connection_info['serial'] = bdm.volume_id
# Save the new attachment id and connection_info to the DB
bdm.attachment_id = new_attachment_id
bdm.connection_info = jsonutils.dumps(new_connection_info)
bdm.save()
# Finally mark the attachment as complete, moving the volume
# status from attaching to in-use ahead of the instance
# restarting
volume_api.attachment_complete(cctxt, new_attachment_id)
return 0
finally:
# If the bdm.attachment_id wasn't updated make sure we clean
# up any attachments created during the run.
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
cctxt, volume_id, instance_uuid)
if (
new_attachment_id and
bdm.attachment_id != new_attachment_id
):
volume_api.attachment_delete(cctxt, new_attachment_id)
# If we failed during attachment_update the bdm.attachment_id
# has already been deleted so recreate it now to ensure the
# volume is still associated with the instance and clear the
# now stale connection_info.
try:
volume_api.attachment_get(cctxt, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:
bdm.attachment_id = volume_api.attachment_create(
cctxt, volume_id, instance_uuid)['id']
bdm.connection_info = None
bdm.save()
# Finish the instance action if it was created and started
# TODO(lyarwood): While not really required we should store
# the exec and traceback in here on failure.
if instance_action:
instance_action.finish()
# NOTE(lyarwood): As above we need to unlock the instance with
# a fresh context and request-id to keep it unique. It's safe
# to assume that the instance is locked as this point as the
# earlier call to lock isn't part of this block.
with context.target_cell(
context.get_admin_context(),
im.cell_mapping
) as u_cctxt:
compute_api.unlock(u_cctxt, instance)
@action_description(
_("Refresh the connection info for a given volume attachment"))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'volume_id', metavar='<volume_id>',
help='UUID of the volume')
@args(
'connector_path', metavar='<connector_path>',
help='Path to file containing the host connector in json format.')
def refresh(self, instance_uuid=None, volume_id=None, connector_path=None):
"""Refresh the connection_info associated with a volume attachment
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Connector path does not exist.
* 3: Failed to open connector path.
* 4: Instance does not exist.
* 5: Instance state invalid.
* 6: Volume is not attached to instance.
"""
try:
# TODO(lyarwood): Make this optional and provide a rpcapi capable
# of pulling this down from the target compute during this flow.
if not os.path.exists(connector_path):
raise exception.InvalidInput(
reason=f'Connector file not found at {connector_path}')
# Read in the json connector file
with open(connector_path, 'rb') as connector_file:
connector = jsonutils.load(connector_file)
# Refresh the volume attachment
return self._refresh(instance_uuid, volume_id, connector)
except exception.VolumeBDMNotFound as e:
print(str(e))
return 6
except exception.InstanceInvalidState as e:
print(str(e))
return 5
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 4
except (ValueError, OSError):
print(
f'Failed to open {connector_path}. Does it contain valid '
f'connector_info data?'
)
return 3
except exception.InvalidInput as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
class ImagePropertyCommands():
@action_description(_("Show the value of an instance image property."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'property', metavar='<image_property>',
help='Image property to show')
def show(self, instance_uuid=None, image_property=None):
"""Show value of a given instance image property.
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Instance not found.
* 3: Image property not found.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance_uuid, expected_attrs=['system_metadata'])
image_property = instance.system_metadata.get(
f'image_{image_property}')
if image_property:
print(image_property)
return 0
else:
print(f'Image property {image_property} not found '
f'for instance {instance_uuid}.')
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print(f'Unexpected error, see nova-manage.log for the full '
f'trace: {str(e)}')
LOG.exception('Unexpected error')
return 1
def _validate_image_properties(self, image_properties):
"""Validate the provided image property names and values
:param image_properties: List of image property names and values
"""
# Sanity check the format of the provided properties, this should be
# in the format of name=value.
if any(x for x in image_properties if '=' not in x):
raise exception.InvalidInput(
"--property should use the format key=value")
# Transform the list of delimited properties to a dict
image_properties = dict(prop.split('=') for prop in image_properties)
# Validate the names of each property by checking against the o.vo
# fields currently listed by ImageProps. We can't use from_dict to
# do this as it silently ignores invalid property keys.
for image_property_name in image_properties.keys():
if image_property_name not in objects.ImageMetaProps.fields:
raise exception.InvalidImagePropertyName(
image_property_name=image_property_name)
# Validate the values by creating an object from the provided dict.
objects.ImageMetaProps.from_dict(image_properties)
# Return the dict so we can update the instance system_metadata
return image_properties
def _update_image_properties(self, instance, image_properties):
"""Update instance image properties
:param instance: The instance to update
:param image_properties: List of image properties and values to update
"""
# Check the state of the instance
allowed_states = [
obj_fields.InstanceState.STOPPED,
obj_fields.InstanceState.SHELVED,
obj_fields.InstanceState.SHELVED_OFFLOADED,
]
if instance.vm_state not in allowed_states:
raise exception.InstanceInvalidState(
instance_uuid=instance.uuid, attr='vm_state',
state=instance.vm_state,
method='image_property set (must be STOPPED, SHELVED, OR '
'SHELVED_OFFLOADED).')
# Validate the property names and values
image_properties = self._validate_image_properties(image_properties)
# Update the image properties and save the instance record
for image_property, value in image_properties.items():
instance.system_metadata[f'image_{image_property}'] = value
# Save and return 0
instance.save()
return 0
@action_description(_(
"Set the values of instance image properties stored in the database. "
"This is only allowed for " "instances with a STOPPED, SHELVED or "
"SHELVED_OFFLOADED vm_state."))
@args(
'instance_uuid', metavar='<instance_uuid>',
help='UUID of the instance')
@args(
'--property', metavar='<image_property>', action='append',
dest='image_properties',
help='Image property to set using the format name=value. For example: '
'--property hw_disk_bus=virtio --property hw_cdrom_bus=sata')
def set(self, instance_uuid=None, image_properties=None):
"""Set instance image property values
Return codes:
* 0: Command completed successfully.
* 1: An unexpected error happened.
* 2: Unable to find instance.
* 3: Instance is in an invalid state.
* 4: Invalid input format.
* 5: Invalid image property name.
* 6: Invalid image property value.
"""
try:
ctxt = context.get_admin_context()
im = objects.InstanceMapping.get_by_instance_uuid(
ctxt, instance_uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance_uuid, expected_attrs=['system_metadata'])
return self._update_image_properties(
instance, image_properties)
except ValueError as e:
print(str(e))
return 6
except exception.InvalidImagePropertyName as e:
print(str(e))
return 5
except exception.InvalidInput as e:
print(str(e))
return 4
except exception.InstanceInvalidState as e:
print(str(e))
return 3
except (
exception.InstanceNotFound,
exception.InstanceMappingNotFound,
) as e:
print(str(e))
return 2
except Exception as e:
print('Unexpected error, see nova-manage.log for the full '
'trace: %s ' % str(e))
LOG.exception('Unexpected error')
return 1
CATEGORIES = {
'api_db': ApiDbCommands,
'cell_v2': CellV2Commands,
'db': DbCommands,
'placement': PlacementCommands,
'libvirt': LibvirtCommands,
'volume_attachment': VolumeAttachmentCommands,
'image_property': ImagePropertyCommands,
}
add_command_parsers = functools.partial(cmd_common.add_command_parsers,
categories=CATEGORIES)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
post_mortem_opt = cfg.BoolOpt('post-mortem',
default=False,
help='Allow post-mortem debugging')
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts([category_opt, post_mortem_opt])
config.parse_args(sys.argv)
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "nova")
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return 0
if CONF.category.name == "bash-completion":
cmd_common.print_bash_completion(CATEGORIES)
return 0
try:
fn, fn_args, fn_kwargs = cmd_common.get_action_fn()
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return ret
except Exception:
if CONF.post_mortem:
import pdb
pdb.post_mortem()
else:
print(_("An error has occurred:\n%s") % traceback.format_exc())
return 255
| [
[
[
879,
890
],
[
64952,
64963
],
[
127032,
127043
],
[
127096,
127107
],
[
65011,
65022
]
],
[
[
898,
907
],
[
153352,
153361
]
],
[
[
915,
917
],
[
145201,
145203
]
],
[
[
925,
927
],
[
3429,
3431
]
],
[
[
935,
938
],
[
12485,
12488
],
[
19404,
19407
],
[
154013,
154016
]
],
[
[
946,
950
],
[
19512,
19516
]
],
[
[
958,
967
],
[
154756,
154765
]
],
[
[
975,
987
],
[
65435,
65437
],
[
65457,
65459
],
[
65470,
65472
],
[
65396,
65398
],
[
65404,
65406
],
[
65417,
65419
],
[
67503,
67505
],
[
67352,
67354
],
[
67365,
67367
],
[
67398,
67400
],
[
67466,
67468
],
[
67479,
67481
]
],
[
[
1007,
1024
],
[
3388,
3396
],
[
3486,
3494
],
[
3630,
3638
],
[
32728,
32736
]
],
[
[
1047,
1072
],
[
12186,
12201
],
[
20545,
20560
]
],
[
[
1099,
1119
],
[
84834,
84840
]
],
[
[
1153,
1185
],
[
63824,
63842
],
[
78392,
78410
],
[
79755,
79773
]
],
[
[
1217,
1226
],
[
136351,
136360
]
],
[
[
1234,
1260
],
[
119238,
119241
],
[
119248,
119251
],
[
119263,
119266
],
[
119276,
119279
],
[
119306,
119309
],
[
119358,
119361
],
[
119410,
119413
],
[
119420,
119423
]
],
[
[
1285,
1288
],
[
153482,
153485
],
[
153716,
153719
]
],
[
[
1309,
1328
],
[
11804,
11810
],
[
21053,
21059
],
[
29475,
29481
],
[
29862,
29868
],
[
31204,
31210
],
[
34079,
34085
]
],
[
[
1350,
1364
],
[
2827,
2834
],
[
154027,
154034
],
[
154076,
154083
],
[
154150,
154157
]
],
[
[
1372,
1399
],
[
26841,
26850
],
[
27038,
27047
]
],
[
[
1431,
1440
],
[
87259,
87268
],
[
135139,
135148
],
[
135231,
135240
],
[
136608,
136617
],
[
141800,
141809
],
[
145493,
145502
]
],
[
[
1464,
1475
],
[
4617,
4628
]
],
[
[
1499,
1508
],
[
40519,
40528
],
[
49148,
49157
]
],
[
[
1516,
1527
],
[
4030,
4041
],
[
24431,
24442
],
[
50208,
50219
],
[
59707,
59718
]
],
[
[
1558,
1573
],
[
32236,
32244
]
],
[
[
1596,
1616
],
[
3277,
3287
],
[
3314,
3324
],
[
153370,
153380
],
[
154373,
154383
],
[
154478,
154488
]
],
[
[
1642,
1645
],
[
108929,
108932
],
[
138034,
138037
]
],
[
[
1671,
1687
],
[
139920,
139936
]
],
[
[
1713,
1719
],
[
137992,
137998
]
],
[
[
1727,
1736
],
[
2806,
2810
]
],
[
[
1754,
1760
],
[
153995,
154001
]
],
[
[
1778,
1785
],
[
7092,
7099
],
[
7411,
7418
],
[
11461,
11468
],
[
14490,
14497
],
[
17520,
17527
],
[
20898,
20905
],
[
21312,
21319
],
[
23235,
23242
],
[
29376,
29383
],
[
29720,
29727
],
[
32835,
32842
],
[
33421,
33428
],
[
37225,
37232
],
[
38799,
38806
],
[
43409,
43416
],
[
43923,
43930
],
[
46914,
46921
],
[
48668,
48675
],
[
50036,
50043
],
[
51920,
51927
],
[
52724,
52731
],
[
53064,
53071
],
[
56899,
56906
],
[
59089,
59096
],
[
60832,
60839
],
[
61642,
61649
],
[
64128,
64135
],
[
65357,
65364
],
[
67270,
67277
],
[
100404,
100411
],
[
102659,
102666
],
[
106281,
106288
],
[
108877,
108884
],
[
114582,
114589
],
[
114765,
114772
],
[
115429,
115436
],
[
115938,
115945
],
[
126747,
126754
],
[
129178,
129185
],
[
131006,
131013
],
[
132981,
132988
],
[
134643,
134650
],
[
134788,
134795
],
[
138060,
138067
],
[
138180,
138187
],
[
139480,
139487
],
[
139521,
139528
],
[
143902,
143909
],
[
143943,
143950
],
[
147165,
147172
],
[
147310,
147317
],
[
151878,
151885
],
[
152023,
152030
]
],
[
[
1806,
1827
],
[
11296,
11304
],
[
11405,
11413
]
],
[
[
1853,
1862
],
[
5886,
5888
],
[
17649,
17651
],
[
21391,
21393
],
[
21544,
21546
]
],
[
[
1883,
1892
],
[
7481,
7490
],
[
8366,
8375
],
[
8481,
8490
],
[
25922,
25931
],
[
26053,
26062
],
[
29804,
29813
]
],
[
[
1910,
1919
],
[
7542,
7551
],
[
39343,
39352
],
[
43561,
43570
],
[
44098,
44107
],
[
44822,
44831
],
[
47111,
47120
],
[
52099,
52108
],
[
57031,
57040
],
[
59306,
59315
],
[
61011,
61020
],
[
61232,
61241
],
[
61875,
61884
],
[
63890,
63899
],
[
74385,
74394
],
[
74565,
74574
],
[
75617,
75626
],
[
78943,
78952
],
[
80439,
80448
],
[
81557,
81566
],
[
81821,
81830
],
[
84874,
84883
],
[
84943,
84952
],
[
89722,
89731
],
[
100782,
100791
],
[
101372,
101381
],
[
103000,
103009
],
[
103156,
103165
],
[
103210,
103219
],
[
103264,
103273
],
[
103423,
103432
],
[
103556,
103565
],
[
103690,
103699
],
[
103854,
103863
],
[
103916,
103925
],
[
106837,
106846
],
[
110406,
110415
],
[
110623,
110632
],
[
110844,
110853
],
[
111875,
111884
],
[
112070,
112079
],
[
112216,
112225
],
[
117849,
117858
],
[
118089,
118098
],
[
120464,
120473
],
[
124443,
124452
],
[
125406,
125415
],
[
127352,
127361
],
[
129529,
129538
],
[
129573,
129582
],
[
131268,
131277
],
[
131369,
131378
],
[
131472,
131481
],
[
131585,
131594
],
[
131625,
131634
],
[
133036,
133045
],
[
135359,
135368
],
[
135469,
135478
],
[
135509,
135518
],
[
138527,
138536
],
[
138797,
138806
],
[
143040,
143049
],
[
145255,
145264
],
[
145655,
145664
],
[
145751,
145760
],
[
145864,
145873
],
[
145904,
145913
],
[
146224,
146233
],
[
147926,
147935
],
[
147966,
147975
],
[
148680,
148689
],
[
149278,
149287
],
[
150166,
150175
],
[
152407,
152416
],
[
152510,
152519
],
[
152601,
152610
],
[
152714,
152723
],
[
152754,
152763
]
],
[
[
1942,
1943
],
[
42496,
42497
],
[
42601,
42602
],
[
45575,
45576
],
[
45687,
45688
],
[
45986,
45987
],
[
47772,
47773
],
[
47983,
47984
],
[
48131,
48132
],
[
48269,
48270
],
[
48379,
48380
],
[
48481,
48482
],
[
49572,
49573
],
[
50918,
50919
],
[
51108,
51109
],
[
54645,
54646
],
[
54754,
54755
],
[
54872,
54873
],
[
55104,
55105
],
[
55302,
55303
],
[
55564,
55565
],
[
58950,
58951
],
[
60081,
60082
],
[
60190,
60191
],
[
94924,
94925
],
[
106994,
106995
],
[
125602,
125603
],
[
128592,
128593
],
[
129906,
129907
],
[
132256,
132257
],
[
133612,
133613
],
[
135843,
135844
],
[
144130,
144131
],
[
146569,
146570
],
[
150847,
150848
],
[
7595,
7596
],
[
7776,
7777
],
[
11205,
11206
],
[
11332,
11333
],
[
11851,
11852
],
[
12286,
12287
],
[
12502,
12503
],
[
15416,
15417
],
[
15484,
15485
],
[
15680,
15681
],
[
15723,
15724
],
[
15858,
15859
],
[
15978,
15979
],
[
20415,
20416
],
[
20645,
20646
],
[
21091,
21092
],
[
21262,
21263
],
[
21514,
21515
],
[
22036,
22037
],
[
22348,
22349
],
[
23501,
23502
],
[
23666,
23667
],
[
24456,
24457
],
[
24509,
24510
],
[
24588,
24589
],
[
25342,
25343
],
[
26373,
26374
],
[
26598,
26599
],
[
27105,
27106
],
[
27445,
27446
],
[
27674,
27675
],
[
28514,
28515
],
[
29518,
29519
],
[
29906,
29907
],
[
31247,
31248
],
[
37065,
37066
],
[
39055,
39056
],
[
39499,
39500
],
[
40173,
40174
],
[
47165,
47166
],
[
50089,
50090
],
[
50100,
50101
],
[
50111,
50112
],
[
50154,
50155
],
[
50180,
50181
],
[
52148,
52149
],
[
52481,
52482
],
[
53277,
53278
],
[
53615,
53616
],
[
53820,
53821
],
[
57080,
57081
],
[
58021,
58022
],
[
58194,
58195
],
[
58523,
58524
],
[
58646,
58647
],
[
58786,
58787
],
[
59359,
59360
],
[
59647,
59648
],
[
59663,
59664
],
[
59679,
59680
],
[
61060,
61061
],
[
61281,
61282
],
[
61425,
61426
],
[
61974,
61975
],
[
77235,
77236
],
[
78111,
78112
],
[
78476,
78477
],
[
79577,
79578
],
[
79847,
79848
],
[
84216,
84217
],
[
84499,
84500
],
[
85044,
85045
],
[
86078,
86079
],
[
87359,
87360
],
[
87649,
87650
],
[
88716,
88717
],
[
88960,
88961
],
[
89852,
89853
],
[
93489,
93490
],
[
99287,
99288
],
[
99456,
99457
],
[
100163,
100164
],
[
100333,
100334
],
[
101425,
101426
],
[
101624,
101625
],
[
102222,
102223
],
[
104589,
104590
],
[
104726,
104727
],
[
109476,
109477
],
[
109996,
109997
],
[
110108,
110109
],
[
111567,
111568
],
[
112344,
112345
],
[
113121,
113122
],
[
113477,
113478
],
[
113838,
113839
],
[
120188,
120189
],
[
120535,
120536
],
[
121807,
121808
],
[
122353,
122354
],
[
122608,
122609
],
[
123034,
123035
],
[
127410,
127411
],
[
127935,
127936
],
[
128291,
128292
],
[
129399,
129400
],
[
131070,
131071
],
[
131949,
131950
],
[
133470,
133471
],
[
154722,
154723
]
],
[
[
1969,
1978
],
[
63707,
63716
],
[
63759,
63768
],
[
64278,
64287
],
[
66221,
66230
],
[
77110,
77119
],
[
77170,
77179
],
[
77573,
77582
],
[
77992,
78001
],
[
79381,
79390
],
[
79470,
79479
]
],
[
[
2004,
2026
],
[
64075,
64086
],
[
64199,
64210
],
[
66062,
66073
],
[
67311,
67322
],
[
68261,
68272
],
[
77044,
77055
],
[
77875,
77886
],
[
79319,
79330
],
[
101808,
101819
]
],
[
[
2044,
2051
],
[
7281,
7288
],
[
7340,
7347
],
[
11751,
11758
],
[
18413,
18420
],
[
18653,
18660
],
[
18900,
18907
],
[
20990,
20997
],
[
26926,
26933
],
[
28101,
28108
],
[
29575,
29582
],
[
29630,
29637
],
[
33078,
33085
],
[
33126,
33133
],
[
33495,
33502
],
[
33783,
33790
],
[
37389,
37396
],
[
37513,
37520
],
[
38506,
38513
],
[
38969,
38976
],
[
39246,
39253
],
[
40380,
40387
],
[
40572,
40579
],
[
40938,
40945
],
[
43472,
43479
],
[
44033,
44040
],
[
44254,
44261
],
[
49197,
49204
],
[
49991,
49998
],
[
52035,
52042
],
[
52313,
52320
],
[
52798,
52805
],
[
52938,
52945
],
[
53142,
53149
],
[
56967,
56974
],
[
59238,
59245
],
[
59464,
59471
],
[
59585,
59592
],
[
60947,
60954
],
[
61173,
61180
],
[
61716,
61723
],
[
61803,
61810
],
[
62902,
62909
],
[
65992,
65999
],
[
66341,
66348
],
[
66547,
66554
],
[
67007,
67014
],
[
93278,
93285
],
[
94670,
94677
],
[
100601,
100608
],
[
100712,
100719
],
[
101199,
101206
],
[
101313,
101320
],
[
101537,
101544
],
[
102146,
102153
],
[
106154,
106161
],
[
106596,
106603
],
[
114639,
114646
],
[
115123,
115130
],
[
115507,
115514
],
[
116007,
116014
],
[
134688,
134695
],
[
134863,
134870
],
[
138101,
138108
],
[
138257,
138264
],
[
138326,
138333
],
[
139821,
139828
],
[
142323,
142330
],
[
147210,
147217
],
[
147390,
147397
],
[
149225,
149232
],
[
149460,
149467
],
[
151923,
151930
],
[
152103,
152110
],
[
154182,
154189
]
],
[
[
2077,
2109
],
[
5950,
5966
]
],
[
[
2135,
2167
],
[
6419,
6435
]
],
[
[
2193,
2213
],
[
138471,
138481
],
[
149950,
149960
],
[
149996,
150006
],
[
150042,
150052
]
],
[
[
2239,
2271
],
[
46972,
46988
]
],
[
[
2297,
2321
],
[
6254,
6266
]
],
[
[
2347,
2387
],
[
6337,
6357
],
[
6574,
6594
]
],
[
[
2413,
2441
],
[
6649,
6663
]
],
[
[
2467,
2487
],
[
5733,
5743
],
[
5808,
5818
]
],
[
[
2513,
2555
],
[
6490,
6511
]
],
[
[
2573,
2576
],
[
154553,
154556
]
],
[
[
2611,
2617
],
[
101697,
101703
],
[
126894,
126900
]
],
[
[
2645,
2669
],
[
73739,
73754
],
[
80840,
80855
]
],
[
[
2687,
2692
],
[
136298,
136303
]
],
[
[
2710,
2717
],
[
154260,
154267
]
],
[
[
2748,
2766
],
[
129226,
129244
],
[
131149,
131167
],
[
132918,
132936
]
],
[
[
2791,
2797
],
[
137954,
137960
]
],
[
[
2799,
2803
],
[
26291,
26295
],
[
26796,
26800
],
[
26875,
26879
],
[
27357,
27361
],
[
27867,
27871
],
[
40731,
40735
],
[
136416,
136420
],
[
136458,
136462
],
[
136554,
136558
],
[
153935,
153939
],
[
154164,
154168
],
[
154213,
154217
],
[
154324,
154328
],
[
154619,
154623
],
[
31885,
31889
]
],
[
[
2821,
2824
],
[
22174,
22177
],
[
129817,
129820
],
[
131879,
131882
],
[
133275,
133278
],
[
135763,
135766
],
[
136887,
136890
],
[
146456,
146459
],
[
148218,
148221
],
[
153008,
153011
]
],
[
[
2920,
2945
],
[
154119,
154144
]
],
[
[
3206,
3213
],
[
85372,
85379
],
[
87323,
87330
],
[
88676,
88683
]
],
[
[
3225,
3232
],
[
85927,
85934
],
[
86161,
86168
],
[
86845,
86852
],
[
87613,
87620
],
[
88920,
88927
]
],
[
[
3270,
3274
],
[
6706,
6710
],
[
6860,
6864
],
[
8511,
8515
],
[
8848,
8852
],
[
9113,
9117
],
[
9251,
9255
],
[
9492,
9496
],
[
9639,
9643
],
[
9766,
9770
],
[
10347,
10351
],
[
19571,
19575
],
[
19883,
19887
],
[
19995,
19999
],
[
20127,
20131
],
[
23052,
23056
],
[
25739,
25743
],
[
28754,
28758
],
[
30269,
30273
],
[
34351,
34355
],
[
34550,
34554
],
[
34914,
34918
],
[
41157,
41161
],
[
41298,
41302
],
[
41370,
41374
],
[
42411,
42415
],
[
42535,
42539
],
[
45353,
45357
],
[
45521,
45525
],
[
45634,
45638
],
[
45885,
45889
],
[
47940,
47944
],
[
48015,
48019
],
[
48181,
48185
],
[
48325,
48329
],
[
48426,
48430
],
[
49518,
49522
],
[
50851,
50855
],
[
51017,
51021
],
[
54554,
54558
],
[
54688,
54692
],
[
54784,
54788
],
[
54994,
54998
],
[
55232,
55236
],
[
55496,
55500
],
[
58874,
58878
],
[
59990,
59994
],
[
60114,
60118
],
[
95627,
95631
],
[
96025,
96029
],
[
96159,
96163
],
[
96367,
96371
],
[
96632,
96636
],
[
97159,
97163
],
[
97355,
97359
],
[
107235,
107239
],
[
125849,
125853
],
[
125983,
125987
],
[
126141,
126145
],
[
128668,
128672
],
[
130104,
130108
],
[
130203,
130207
],
[
130293,
130297
],
[
132369,
132373
],
[
133670,
133674
],
[
133770,
133774
],
[
133860,
133864
],
[
134059,
134063
],
[
135892,
135896
],
[
144199,
144203
],
[
144299,
144303
],
[
144389,
144393
],
[
146626,
146630
],
[
146726,
146730
],
[
151050,
151054
],
[
151150,
151154
]
],
[
[
3293,
3311
],
[
47744,
47762
],
[
94896,
94914
],
[
106966,
106984
],
[
125574,
125592
],
[
128564,
128582
],
[
129878,
129896
],
[
132228,
132246
],
[
133593,
133611
],
[
135824,
135842
],
[
144102,
144120
],
[
146550,
146568
],
[
150828,
150846
]
],
[
[
3350,
3368
],
[
50631,
50649
],
[
50691,
50709
]
],
[
[
3668,
3679
],
[
15589,
15600
],
[
135127,
135138
],
[
135301,
135312
],
[
136681,
136692
]
],
[
[
4675,
4685
],
[
153150,
153160
]
],
[
[
25625,
25638
],
[
153094,
153107
]
],
[
[
26099,
26113
],
[
153124,
153138
]
],
[
[
62181,
62198
],
[
153179,
153196
]
],
[
[
128483,
128498
],
[
153213,
153228
]
],
[
[
133553,
133577
],
[
153255,
153279
]
],
[
[
146519,
146540
],
[
153303,
153324
]
],
[
[
153065,
153075
],
[
153453,
153463
],
[
154406,
154416
]
],
[
[
153330,
153349
],
[
153676,
153695
]
],
[
[
153467,
153479
],
[
153959,
153971
]
],
[
[
153698,
153713
],
[
153973,
153988
]
],
[
[
153860,
153864
]
]
] |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import configparser
import getpass
import itertools
import os
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from hashlib import sha1
from typing import Any, ClassVar, Dict, Iterable, List, Mapping, Sequence, Union, cast
import toml
from typing_extensions import Protocol
from pants.base.build_environment import get_buildroot
from pants.option.ranked_value import Value
from pants.util.eval import parse_expression
from pants.util.ordered_set import OrderedSet
# A dict with optional override seed values for buildroot, pants_workdir, and pants_distdir.
SeedValues = Dict[str, Value]
class ConfigSource(Protocol):
"""A protocol that matches pants.engine.fs.FileContent.
Also matches the ad-hoc FileContent-like class we use during options bootstrapping, where we
cannot use pants.engine.fs.FileContent itself due to circular imports.
"""
@property
def path(self) -> str:
raise NotImplementedError()
@property
def content(self) -> bytes:
raise NotImplementedError()
class Config(ABC):
"""Encapsulates config file loading and access, including encapsulation of support for multiple
config files.
Supports variable substitution using old-style Python format strings. E.g., %(var_name)s will be
replaced with the value of var_name.
"""
DEFAULT_SECTION: ClassVar[str] = configparser.DEFAULTSECT
class ConfigError(Exception):
pass
class ConfigValidationError(ConfigError):
pass
@classmethod
def load(
cls,
file_contents: Iterable[ConfigSource],
*,
seed_values: SeedValues | None = None,
) -> Config:
"""Loads config from the given string payloads, with later payloads overriding earlier ones.
A handful of seed values will be set to act as if specified in the loaded config file's
DEFAULT section, and be available for use in substitutions. The caller may override some of
these seed values.
"""
single_file_configs = []
for file_content in file_contents:
content_digest = sha1(file_content.content).hexdigest()
normalized_seed_values = cls._determine_seed_values(seed_values=seed_values)
try:
config_values = cls._parse_toml(
file_content.content.decode(), normalized_seed_values
)
except Exception as e:
raise cls.ConfigError(
f"Config file {file_content.path} could not be parsed as TOML:\n {e}"
)
single_file_configs.append(
_SingleFileConfig(
config_path=file_content.path,
content_digest=content_digest,
values=config_values,
),
)
return _ChainedConfig(tuple(reversed(single_file_configs)))
@classmethod
def _parse_toml(
cls, config_content: str, normalized_seed_values: dict[str, str]
) -> _ConfigValues:
"""Attempt to parse as TOML, raising an exception on failure."""
toml_values = cast(Dict[str, Any], toml.loads(config_content))
toml_values["DEFAULT"] = {
**normalized_seed_values,
**toml_values.get("DEFAULT", {}),
}
return _ConfigValues(toml_values)
@staticmethod
def _determine_seed_values(*, seed_values: SeedValues | None = None) -> dict[str, str]:
"""We pre-populate several default values to allow %([key-name])s interpolation.
This sets up those defaults and checks if the user overrode any of the values.
"""
safe_seed_values = seed_values or {}
buildroot = cast(str, safe_seed_values.get("buildroot", get_buildroot()))
all_seed_values: dict[str, str] = {
"buildroot": buildroot,
"homedir": os.path.expanduser("~"),
"user": getpass.getuser(),
}
def update_seed_values(key: str, *, default_dir: str) -> None:
all_seed_values[key] = cast(
str, safe_seed_values.get(key, os.path.join(buildroot, default_dir))
)
update_seed_values("pants_workdir", default_dir=".pants.d")
update_seed_values("pants_distdir", default_dir="dist")
return all_seed_values
def get(self, section, option, type_=str, default=None):
"""Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as
type.
If the specified section does not exist or is missing a definition for the option, the value
is looked up in the DEFAULT section. If there is still no definition found, the default
value supplied is returned.
"""
if not self.has_option(section, option):
return default
raw_value = self.get_value(section, option)
if issubclass(type_, str):
return raw_value
key = f"{section}.{option}"
return parse_expression(
name=key, val=raw_value, acceptable_types=type_, raise_type=self.ConfigError
)
@abstractmethod
def configs(self) -> Sequence[_SingleFileConfig]:
"""Returns the underlying single-file configs represented by this object."""
@abstractmethod
def sources(self) -> list[str]:
"""Returns the sources of this config as a list of filenames."""
@abstractmethod
def sections(self) -> list[str]:
"""Returns the sections in this config (not including DEFAULT)."""
@abstractmethod
def has_section(self, section: str) -> bool:
"""Returns whether this config has the section."""
@abstractmethod
def has_option(self, section: str, option: str) -> bool:
"""Returns whether this config specified a value for the option."""
@abstractmethod
def get_value(self, section: str, option: str) -> str | None:
"""Returns the value of the option in this config as a string, or None if no value
specified."""
@abstractmethod
def get_source_for_option(self, section: str, option: str) -> str | None:
"""Returns the path to the source file the given option was defined in.
:param section: the scope of the option.
:param option: the name of the option.
:returns: the path to the config file, or None if the option was not defined by a config file.
"""
_TomlPrimitive = Union[bool, int, float, str]
_TomlValue = Union[_TomlPrimitive, List[_TomlPrimitive]]
@dataclass(frozen=True)
class _ConfigValues:
"""The parsed contents of a TOML config file."""
values: dict[str, Any]
@staticmethod
def _is_an_option(option_value: _TomlValue | dict) -> bool:
"""Determine if the value is actually an option belonging to that section.
This handles the special syntax of `my_list_option.add` and `my_list_option.remove`.
"""
if isinstance(option_value, dict):
return "add" in option_value or "remove" in option_value
return True
def _possibly_interpolate_value(
self,
raw_value: str,
*,
option: str,
section: str,
section_values: dict,
) -> str:
"""For any values with %(foo)s, substitute it with the corresponding value from DEFAULT or
the same section."""
def format_str(value: str) -> str:
# Because dictionaries use the symbols `{}`, we must proactively escape the symbols so
# that .format() does not try to improperly interpolate.
escaped_str = value.replace("{", "{{").replace("}", "}}")
new_style_format_str = re.sub(
pattern=r"%\((?P<interpolated>[a-zA-Z_0-9]*)\)s",
repl=r"{\g<interpolated>}",
string=escaped_str,
)
try:
possible_interpolations = {**self.defaults, **section_values}
return new_style_format_str.format(**possible_interpolations)
except KeyError as e:
bad_reference = e.args[0]
raise configparser.InterpolationMissingOptionError(
option,
section,
raw_value,
bad_reference,
)
def recursively_format_str(value: str) -> str:
# It's possible to interpolate with a value that itself has an interpolation. We must
# fully evaluate all expressions for parity with configparser.
match = re.search(r"%\(([a-zA-Z_0-9]*)\)s", value)
if not match:
return value
return recursively_format_str(value=format_str(value))
return recursively_format_str(raw_value)
def _stringify_val(
self,
raw_value: _TomlValue,
*,
option: str,
section: str,
section_values: dict,
interpolate: bool = True,
list_prefix: str | None = None,
) -> str:
"""For parity with configparser, we convert all values back to strings, which allows us to
avoid upstream changes to files like parser.py.
This is clunky. If we drop INI support, we should remove this and use native values
(although we must still support interpolation).
"""
possibly_interpolate = partial(
self._possibly_interpolate_value,
option=option,
section=section,
section_values=section_values,
)
if isinstance(raw_value, str):
return possibly_interpolate(raw_value) if interpolate else raw_value
if isinstance(raw_value, list):
def stringify_list_member(member: _TomlPrimitive) -> str:
if not isinstance(member, str):
return str(member)
interpolated_member = possibly_interpolate(member) if interpolate else member
return f'"{interpolated_member}"'
list_members = ", ".join(stringify_list_member(member) for member in raw_value)
return f"{list_prefix or ''}[{list_members}]"
return str(raw_value)
def _stringify_val_without_interpolation(self, raw_value: _TomlValue) -> str:
return self._stringify_val(
raw_value,
option="",
section="",
section_values={},
interpolate=False,
)
@property
def sections(self) -> list[str]:
return [scope for scope in self.values if scope != "DEFAULT"]
def has_section(self, section: str) -> bool:
return section in self.values
def has_option(self, section: str, option: str) -> bool:
if not self.has_section(section):
return False
return option in self.values[section] or option in self.defaults
def get_value(self, section: str, option: str) -> str | None:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
stringify = partial(
self._stringify_val,
option=option,
section=section,
section_values=section_values,
)
if option not in section_values:
if option in self.defaults:
return stringify(raw_value=self.defaults[option])
raise configparser.NoOptionError(option, section)
option_value = section_values[option]
if not isinstance(option_value, dict):
return stringify(option_value)
# Handle dict options, along with the special `my_list_option.add` and
# `my_list_option.remove` syntax. We only treat `add` and `remove` as the special list
# syntax if the values are lists to reduce the risk of incorrectly special casing.
has_add = isinstance(option_value.get("add"), list)
has_remove = isinstance(option_value.get("remove"), list)
if not has_add and not has_remove:
return stringify(option_value)
add_val = stringify(option_value["add"], list_prefix="+") if has_add else None
remove_val = stringify(option_value["remove"], list_prefix="-") if has_remove else None
if has_add and has_remove:
return f"{add_val},{remove_val}"
if has_add:
return add_val
return remove_val
def options(self, section: str) -> list[str]:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
return [
*section_values.keys(),
*(
default_option
for default_option in self.defaults
if default_option not in section_values
),
]
@property
def defaults(self) -> dict[str, str]:
return {
option: self._stringify_val_without_interpolation(option_val)
for option, option_val in self.values["DEFAULT"].items()
}
@dataclass(frozen=True, eq=False)
class _SingleFileConfig(Config):
"""Config read from a single file."""
config_path: str
content_digest: str
values: _ConfigValues
def configs(self) -> list[_SingleFileConfig]:
return [self]
def sources(self) -> list[str]:
return [self.config_path]
def sections(self) -> list[str]:
return self.values.sections
def has_section(self, section: str) -> bool:
return self.values.has_section(section)
def has_option(self, section: str, option: str) -> bool:
return self.values.has_option(section, option)
def get_value(self, section: str, option: str) -> str | None:
return self.values.get_value(section, option)
def get_source_for_option(self, section: str, option: str) -> str | None:
if self.has_option(section, option):
return self.sources()[0]
return None
def __repr__(self) -> str:
return f"SingleFileConfig({self.config_path})"
def __eq__(self, other: Any) -> bool:
if not isinstance(other, _SingleFileConfig):
return NotImplemented
return self.config_path == other.config_path and self.content_digest == other.content_digest
def __hash__(self) -> int:
return hash(self.content_digest)
@dataclass(frozen=True)
class _ChainedConfig(Config):
"""Config read from multiple sources."""
# Config instances to chain. Later instances take precedence over earlier ones.
chained_configs: tuple[_SingleFileConfig, ...]
@property
def _configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def sources(self) -> list[str]:
# NB: Present the sources in the order we were given them.
return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs)))
def sections(self) -> list[str]:
ret: OrderedSet[str] = OrderedSet()
for cfg in self._configs:
ret.update(cfg.sections())
return list(ret)
def has_section(self, section: str) -> bool:
for cfg in self._configs:
if cfg.has_section(section):
return True
return False
def has_option(self, section: str, option: str) -> bool:
for cfg in self._configs:
if cfg.has_option(section, option):
return True
return False
def get_value(self, section: str, option: str) -> str | None:
for cfg in self._configs:
try:
return cfg.get_value(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if not self.has_section(section):
raise configparser.NoSectionError(section)
raise configparser.NoOptionError(option, section)
def get_source_for_option(self, section: str, option: str) -> str | None:
for cfg in self._configs:
if cfg.has_option(section, option):
return cfg.get_source_for_option(section, option)
return None
def __repr__(self) -> str:
return f"ChainedConfig({self.sources()})"
@dataclass(frozen=True)
class TomlSerializer:
"""Convert a dictionary of option scopes -> Python values into TOML understood by Pants.
The constructor expects a dictionary of option scopes to their corresponding values as
represented in Python. For example:
{
"GLOBAL": {
"o1": True,
"o2": "hello",
"o3": [0, 1, 2],
},
"some-subsystem": {
"dict_option": {
"a": 0,
"b": 0,
},
},
}
"""
parsed: Mapping[str, dict[str, int | float | str | bool | list | dict]]
def normalize(self) -> dict:
def normalize_section_value(option, option_value) -> tuple[str, Any]:
# With TOML, we store dict values as strings (for now).
if isinstance(option_value, dict):
option_value = str(option_value)
if option.endswith(".add"):
option = option.rsplit(".", 1)[0]
option_value = f"+{option_value!r}"
elif option.endswith(".remove"):
option = option.rsplit(".", 1)[0]
option_value = f"-{option_value!r}"
return option, option_value
return {
section: dict(
normalize_section_value(option, option_value)
for option, option_value in section_values.items()
)
for section, section_values in self.parsed.items()
}
def serialize(self) -> str:
toml_values = self.normalize()
return toml.dumps(toml_values)
| [
[
[
155,
166
]
],
[
[
175,
187
],
[
1581,
1593
],
[
11219,
11231
],
[
11594,
11606
],
[
12744,
12756
],
[
15931,
15943
],
[
15960,
15972
],
[
16070,
16082
],
[
16121,
16133
],
[
8327,
8339
]
],
[
[
195,
202
],
[
4142,
4149
]
],
[
[
210,
219
],
[
15111,
15120
]
],
[
[
227,
229
],
[
4097,
4099
],
[
4331,
4333
]
],
[
[
237,
239
],
[
7888,
7890
],
[
8763,
8765
]
],
[
[
256,
259
],
[
1268,
1271
]
],
[
[
261,
275
],
[
5337,
5351
],
[
5497,
5511
],
[
5627,
5641
],
[
5760,
5774
],
[
5889,
5903
],
[
6047,
6061
],
[
6247,
6261
]
],
[
[
300,
309
],
[
6740,
6749
],
[
13243,
13252
],
[
14552,
14561
],
[
16497,
16506
]
],
[
[
332,
339
],
[
9567,
9574
],
[
11277,
11284
]
],
[
[
360,
364
],
[
2324,
2328
]
],
[
[
384,
387
],
[
6860,
6863
],
[
3361,
3364
],
[
14274,
14277
],
[
17199,
17202
]
],
[
[
389,
397
],
[
1565,
1573
]
],
[
[
399,
403
],
[
802,
806
],
[
3351,
3355
]
],
[
[
405,
413
],
[
1782,
1790
]
],
[
[
415,
419
],
[
6715,
6719
]
],
[
[
421,
428
],
[
17029,
17036
]
],
[
[
430,
438
],
[
5377,
5385
]
],
[
[
440,
445
],
[
6651,
6656
],
[
6693,
6698
]
],
[
[
447,
451
],
[
3346,
3350
],
[
3931,
3935
],
[
4278,
4282
]
],
[
[
460,
464
],
[
3367,
3371
],
[
18046,
18050
]
],
[
[
495,
503
],
[
840,
848
]
],
[
[
546,
559
],
[
3975,
3988
]
],
[
[
598,
603
],
[
812,
817
]
],
[
[
632,
648
],
[
5214,
5230
]
],
[
[
684,
694
],
[
15261,
15271
],
[
15243,
15253
]
],
[
[
789,
799
],
[
1838,
1848
],
[
3632,
3642
]
],
[
[
827,
839
],
[
1791,
1803
]
],
[
[
1261,
1267
],
[
13300,
13306
],
[
14596,
14602
],
[
1873,
1879
]
],
[
[
6634,
6648
],
[
6699,
6713
],
[
6720,
6734
],
[
9939,
9953
]
],
[
[
6680,
6690
],
[
6920,
6930
],
[
9036,
9046
],
[
10439,
10449
]
],
[
[
6769,
6782
],
[
13409,
13422
],
[
3236,
3249
],
[
3539,
3552
]
],
[
[
13282,
13299
],
[
14762,
14779
],
[
2851,
2868
],
[
5386,
5403
],
[
13454,
13471
],
[
14321,
14338
],
[
14833,
14850
],
[
14926,
14943
]
],
[
[
14581,
14595
],
[
3062,
3076
]
],
[
[
16526,
16540
]
]
] |
from django.db import models, connection
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class DataOceanManager(models.Manager):
# exclude soft-deleted objects from queryset
def get_queryset(self):
return super().get_queryset().exclude(deleted_at__isnull=False)
class DataOceanModel(models.Model):
name = "No name field in model!"
created_at = models.DateTimeField(auto_now_add=True,
help_text='When the object was created. In YYYY-MM-DDTHH:mm:ss.SSSSSSZ format.')
updated_at = models.DateTimeField(
auto_now=True, null=True, blank=True,
help_text='When the object was update. In YYYY-MM-DDTHH:mm:ss.SSSSSSZ format.'
)
deleted_at = models.DateTimeField(null=True, blank=True, default=None, editable=False)
objects = DataOceanManager()
include_deleted_objects = models.Manager()
@property
def is_deleted(self):
return bool(self.deleted_at)
def soft_delete(self):
self.deleted_at = timezone.now()
self.save(update_fields=['deleted_at', 'updated_at'])
@classmethod
def truncate(cls):
with connection.cursor() as c:
c.execute('TRUNCATE TABLE "{0}"'.format(cls._meta.db_table))
@classmethod
def truncate_cascade(cls):
with connection.cursor() as c:
c.execute('TRUNCATE TABLE "{0}" CASCADE'.format(cls._meta.db_table))
def __str__(self):
return self.name
class Meta:
abstract = True
ordering = ['id']
class Status(DataOceanModel):
name = models.CharField('name', max_length=100, unique=True)
class Meta:
verbose_name = _('status')
class Authority(DataOceanModel):
name = models.CharField('name', max_length=500, unique=True)
code = models.CharField('number', max_length=10, unique=True, null=True)
class Meta:
verbose_name = _('registration authority')
class TaxpayerType(DataOceanModel):
name = models.CharField('name', max_length=200, unique=True)
class Meta:
verbose_name = _('taxpayer type')
class Register(DataOceanModel):
RELEVANT = 'relevant'
OUTDATED = 'outdated'
NOT_SUPPORTED = 'not supported'
STATUSES = [
(RELEVANT, _('Relevant')),
(OUTDATED, _('Outdated')),
(NOT_SUPPORTED, _('Not supported')),
]
name = models.CharField(_('name'), max_length=500, unique=True)
name_eng = models.CharField('name eng', max_length=500, unique=True, null=True)
source_name = models.CharField(_('source'), max_length=300)
source_register_id = models.CharField(_('source ID'), max_length=36, null=True)
source_url_address = models.URLField(_('source url'), max_length=500)
source_api_address = models.URLField(_('source API'), max_length=500, null=True)
api_list = models.CharField(_('API list'), max_length=30, unique=True, null=True, blank=True)
api_detail = models.CharField(_("API detail"), max_length=30, unique=True, null=True, blank=True)
total_records = models.PositiveIntegerField(_('total records'), default=1, blank=True)
status = models.CharField(_('status'), max_length=15, choices=STATUSES, default=RELEVANT,
blank=True)
class Meta:
ordering = ['id']
verbose_name = _('dataset')
verbose_name_plural = _('datasets')
class Report(DataOceanModel):
registry_name = models.CharField(max_length=20, db_index=True)
download_start = models.DateTimeField(auto_now_add=True)
download_finish = models.DateTimeField(null=True, blank=True)
download_status = models.BooleanField(blank=True, default=False)
download_message = models.CharField(max_length=255, null=True, blank=True)
download_file_name = models.CharField(max_length=255, null=True, blank=True)
download_file_length = models.PositiveIntegerField(blank=True, default=0)
unzip_file_name = models.CharField(max_length=255, null=True, blank=True)
unzip_file_arch_length = models.PositiveIntegerField(blank=True, default=0)
unzip_file_real_length = models.PositiveIntegerField(blank=True, default=0)
unzip_status = models.BooleanField(blank=True, default=False)
unzip_message = models.CharField(max_length=255, null=True, blank=True)
update_start = models.DateTimeField(null=True, blank=True)
update_finish = models.DateTimeField(null=True, blank=True)
update_status = models.BooleanField(blank=True, default=False)
update_message = models.CharField(max_length=300, null=True, blank=True)
records_added = models.IntegerField(blank=True, default=0)
records_changed = models.IntegerField(blank=True, default=0)
records_deleted = models.IntegerField(blank=True, default=0)
invalid_data = models.IntegerField(blank=True, default=0)
@staticmethod
def collect_last_day_reports():
day_ago = timezone.now() - timezone.timedelta(hours=24)
return list(Report.objects.filter(created_at__gt=day_ago))
def __str__(self):
return self.registry_name
class Meta:
ordering = ['id']
verbose_name = _('data update report')
verbose_name_plural = _('data update reports')
| [
[
[
22,
28
],
[
155,
161
],
[
344,
350
],
[
414,
420
],
[
590,
596
],
[
768,
774
],
[
906,
912
],
[
1613,
1619
],
[
1765,
1771
],
[
1830,
1836
],
[
2013,
2019
],
[
2397,
2403
],
[
2469,
2475
],
[
2556,
2562
],
[
2627,
2633
],
[
2711,
2717
],
[
2785,
2791
],
[
2860,
2866
],
[
2960,
2966
],
[
3065,
3071
],
[
3149,
3155
],
[
3447,
3453
],
[
3515,
3521
],
[
3577,
3583
],
[
3643,
3649
],
[
3713,
3719
],
[
3794,
3800
],
[
3877,
3883
],
[
3951,
3957
],
[
4036,
4042
],
[
4116,
4122
],
[
4186,
4192
],
[
4253,
4259
],
[
4329,
4335
],
[
4393,
4399
],
[
4457,
4463
],
[
4525,
4531
],
[
4602,
4608
],
[
4667,
4673
],
[
4732,
4738
],
[
4794,
4800
]
],
[
[
30,
40
],
[
1186,
1196
],
[
1347,
1357
]
],
[
[
66,
74
],
[
1055,
1063
],
[
4910,
4918
],
[
4927,
4935
]
],
[
[
112,
129
],
[
1707,
1708
],
[
1936,
1937
],
[
2107,
2108
],
[
2284,
2285
],
[
2319,
2320
],
[
2359,
2360
],
[
2414,
2415
],
[
2573,
2574
],
[
2644,
2645
],
[
2727,
2728
],
[
2801,
2802
],
[
2877,
2878
],
[
2977,
2978
],
[
3093,
3094
],
[
3166,
3167
],
[
3338,
3339
],
[
3381,
3382
],
[
5147,
5148
],
[
5201,
5202
]
],
[
[
138,
154
],
[
857,
873
]
],
[
[
329,
343
],
[
1585,
1599
],
[
1737,
1751
],
[
1985,
1999
],
[
2143,
2157
],
[
3410,
3424
]
],
[
[
1578,
1584
]
],
[
[
1727,
1736
]
],
[
[
1972,
1984
]
],
[
[
2134,
2142
]
],
[
[
3403,
3409
],
[
4976,
4982
]
]
] |
#!/usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess
F = cgi.FieldStorage()
pod_name = F.getvalue("podname")
img_name = F.getvalue("imgname")
output = subprocess.getoutput("sudo kubectl run {0} --image={1}".format(pod_name,img_name))
print("<pre>"+ output +"</pre") | [
[
[
68,
71
],
[
95,
98
]
],
[
[
79,
89
],
[
189,
199
]
],
[
[
91,
92
],
[
125,
126
],
[
158,
159
]
],
[
[
114,
122
],
[
252,
260
]
],
[
[
147,
155
],
[
261,
269
]
],
[
[
180,
186
],
[
287,
293
]
]
] |
import argparse
import os
import json
from pathlib import Path
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(
prog="exercise 3",
description="preprocess meta data")
parser.add_argument('-f', '--file', type=str, required=True, help='meta data file path')
return parser.parse_args()
if __name__ == "__main__":
# get args
args = parse_args()
filepath = args.file
# read data
data_cats = json.load(open(filepath, 'r'))
# convert json to dataframe
df_cat = pd.DataFrame(data_cats)
df_cat['category'] = df_cat['items'].apply(lambda x: x['snippet']['title'])
df_cat['id'] = df_cat['items'].apply(lambda x: int(x['id']))
df_cat_drop = df_cat.drop(columns=['kind', 'etag', 'items'])
# cache
dir_cache = Path(__file__).parent.absolute()/'tmp'
try:
df_cat_drop.to_csv(os.path.join(dir_cache, 'data_cats.csv'))
except FileNotFoundError:
os.mkdir(dir_cache)
df_cat_drop.to_csv(os.path.join(dir_cache, 'data_cats.csv'))
| [
[
[
7,
15
],
[
115,
123
]
],
[
[
23,
25
],
[
873,
875
],
[
953,
955
],
[
1000,
1002
]
],
[
[
33,
37
],
[
460,
464
]
],
[
[
58,
62
],
[
798,
802
]
],
[
[
70,
82
],
[
536,
538
]
],
[
[
88,
98
],
[
389,
399
]
],
[
[
382,
386
],
[
417,
421
]
],
[
[
406,
414
],
[
475,
483
]
],
[
[
448,
457
],
[
549,
558
]
],
[
[
527,
533
],
[
585,
591
],
[
564,
570
],
[
659,
665
],
[
644,
650
],
[
723,
729
]
],
[
[
709,
720
],
[
854,
865
],
[
981,
992
]
],
[
[
786,
795
],
[
886,
895
],
[
962,
971
],
[
1013,
1022
]
]
] |
'''
This file generates the graph of the Model that we are going to use for the order planner for neural summary generator
The function returns the graph object and some of the important handles of the tensors of the graph in a dictionary.
Note, that all the possible tensor handles can be obtained by the tf.get_tensor_by_name() function. This is done to make
things easy.
'''
import tensorflow as tf
# define the graph builder function:
def get_computation_graph(seed_value, field_vocab_size, content_label_vocab_size, field_embedding_size,
content_label_embedding_size, lstm_cell_state_size, hidden_state_size, rev_content_label_dict):
'''
Function for building the graph for model 1:
The architecture is same as defined in the base paper, except the copynet part
'''
# reset the current graph in the session
tf.reset_default_graph()
graph = tf.Graph() # create a new graph object
# define all the graph computations using the as_default function
print("\n\n=============================================================================================================")
print("Building the graph ... ")
with graph.as_default():
# ========================================================================
# | Step 1:
# ========================================================================
print("\nstep 1: Creating input placeholders for the computations ...")
# Placeholders for the input data:
with tf.variable_scope("Input_Data"):
tf_field_encodings = tf.placeholder(tf.int32, shape=(None, None), name="input_field_encodings")
tf_content_encodings = tf.placeholder(tf.int32, shape=(None, None), name="input_content_encodings")
tf_label_encodings = tf.placeholder(tf.int32, shape=(None, None), name="input_label_encodings")
# This is a placeholder for storing the lengths of the input sequences (they are padded to tensor)
tf_input_seqs_lengths = tf.placeholder(tf.int32, shape=(None,), name="input_sequence_lengths")
# This is a placeholder for storing the lengths of the decoder sequences (they are padded to tensor)
tf_label_seqs_lengths = tf.placeholder(tf.int32, shape=(None,), name="decoder_sequence_lengths")
# create the one-hot encoded values for the label_encodings
with tf.variable_scope("One_hot_encoder"):
tf_one_hot_label_encodings = tf.one_hot(tf_label_encodings, depth=content_label_vocab_size)
# print all placeholders for the encodings generated in step 1
print("\tplaceholder for the field_encodings: ", tf_field_encodings)
print("\tplaceholder for the content_encodings: ", tf_content_encodings)
print("\tplaceholder for the label_encodings: ", tf_label_encodings)
print("\tplaceholder for the input_sequence_lengths: ", tf_input_seqs_lengths)
print("\tplaceholder for the label_sequence_lengths: ", tf_label_seqs_lengths)
# ========================================================================
# | Step 2:
# ========================================================================
print("\nstep 2: Creating Embeddings Mechanism for the input and the output words ...")
# Scope for the shared Content_Label matrix
with tf.variable_scope("Unified_Vocabulary_Matrix"):
content_label_embedding_matrix = tf.get_variable("content_label_embedding_matrix",
shape=(content_label_vocab_size, content_label_embedding_size),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
dtype=tf.float32)
# Embeddings for the given input data:
with tf.variable_scope("Input_Embedder"):
# Embed the field encodings:
field_embedding_matrix = tf.get_variable("field_embedding_matrix",
shape=(field_vocab_size, field_embedding_size),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
dtype=tf.float32)
tf_field_embedded = tf.nn.embedding_lookup(field_embedding_matrix, tf_field_encodings, name="field_embedder")
# Embed the content encodings:
tf_content_embedded = tf.nn.embedding_lookup(content_label_embedding_matrix,
tf_content_encodings, name="content_embedder")
print("\tEmbedded_Input_Tensors: ", tf_field_embedded, tf_content_embedded)
# Embeddings for the label (summary sentences):
with tf.variable_scope("Label_Embedder"):
# embed the label encodings
tf_label_embedded = tf.nn.embedding_lookup(content_label_embedding_matrix,
tf_label_encodings, name="label_embedder")
print("\tEmbedded_Label_Tensors: ", tf_label_embedded)
# Concatenate the Input embeddings channel_wise and obtain the combined input tensor
with tf.variable_scope("Input_Concatenator"):
tf_field_content_embedded = tf.concat([tf_field_embedded, tf_content_embedded], axis=-1, name="concatenator")
print("\tFinal_Input_to_the_Encoder: ", tf_field_content_embedded)
# ========================================================================
# | Step 3:
# ========================================================================
print("\nstep 3: Creating the encoder RNN to obtain the encoded input sequences. (The Encoder Module) ... ")
with tf.variable_scope("Encoder"):
encoded_input, encoder_final_state = tf.nn.dynamic_rnn (
cell = tf.nn.rnn_cell.LSTMCell(lstm_cell_state_size), # let all parameters to be default
inputs = tf_field_content_embedded,
sequence_length = tf_input_seqs_lengths,
dtype = tf.float32
)
print("\tEncoded_vectors_bank for attention mechanism: ", encoded_input)
# define the size parameter for the encoded_inputs
encoded_inputs_embeddings_size = encoded_input.shape[-1]
print("\tFinal_state obtained from the last step of encoder: ", encoder_final_state)
# ========================================================================
# | Step 4:
# ========================================================================
print("\nstep 4: defining the Attention Mechanism for the Model (The Dispatcher Module) ...")
print("**step 4.1: defining the content based attention")
with tf.variable_scope("Content_Based_Attention/trainable_weights"):
'''
These weights and bias matrices must be compatible with the dimensions of the h_values and the f_values
passed to the function below. If they are not, some exception might get thrown and it would be difficult
to debug it.
'''
# field weights for the content_based attention
W_f = tf.get_variable("field_attention_weights", shape=(field_embedding_size, content_label_embedding_size),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))
b_f = tf.get_variable("field_attention_biases", shape=(field_embedding_size, 1),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))
# hidden states weights for the content_based attention
W_c = tf.get_variable("content_attention_weights",
shape=(encoded_inputs_embeddings_size, content_label_embedding_size),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))
b_c = tf.get_variable("content_attention_biases", shape=(encoded_inputs_embeddings_size, 1),
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))
# Define the summary_ops for all the weights:
W_f_summary = tf.summary.histogram("Content_based_attention/field_weights", W_f)
b_f_summary = tf.summary.histogram("Content_based_attention/field_biases", b_f)
W_c_summary = tf.summary.histogram("Content_based_attention/content_weights", W_c)
b_c_summary = tf.summary.histogram("Content_based_attention/content_weights", b_c)
with tf.variable_scope("Content_Based_Attention"):
def get_content_based_attention_vectors(query_vectors):
'''
function that returns the alpha_content vector using the yt-1 (query vectors)
'''
# use the W_f and b_f to transform the query_vectors to the shape of f_values
f_trans_query_vectors = tf.matmul(W_f, tf.transpose(query_vectors)) + b_f
# use the W_c and b_c to transform the query_vectors to the shape of h_values
h_trans_query_vectors = tf.matmul(W_c, tf.transpose(query_vectors)) + b_c
# transpose and expand the dims of the f_trans_query_vectors
f_trans_query_matrices = tf.expand_dims(tf.transpose(f_trans_query_vectors), axis=-1)
# obtain the field attention_values by using the matmul operation
field_attention_values = tf.matmul(tf_field_embedded, f_trans_query_matrices)
# perform the same process for the h_trans_query_vectors
h_trans_query_matrices = tf.expand_dims(tf.transpose(h_trans_query_vectors), axis=-1)
hidden_attention_values = tf.matmul(encoded_input, h_trans_query_matrices)
# drop the last dimension (1 sized)
field_attention_values = tf.squeeze(field_attention_values, axis=[-1])
hidden_attention_values = tf.squeeze(hidden_attention_values, axis=[-1])
# free up non_required resources:
ret_value = tf.nn.softmax(field_attention_values * hidden_attention_values, name="softmax")
# return the element wise multiplied values followed by softmax
return ret_value
print("**step 4.2: defining the link based attention")
with tf.variable_scope("Link_Based_Attention/trainable_weights"):
'''
The dimensions of the Link_Matrix must be properly compatible with the field_vocab_size.
If they are not, some exception might get thrown and it would be difficult
to debug it.
'''
Link_Matrix = tf.get_variable("Link_Attention_Matrix", shape=(field_vocab_size, field_vocab_size),
dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0.5, stddev=0.5, seed=seed_value))
# Link_Matrix_summary = tf.summary.histogram("Link_based_attention", Link_Matrix)
print("\tThe Link Matrix used for this attention: ", Link_Matrix)
# define the function for obtaining the link based attention values.
with tf.variable_scope("Link_Based_Attention"):
def get_link_based_attention_vectors(prev_attention_vectors):
'''
This function generates the link based attention vectors using the Link matrix and the
'''
# carve out only the relevant values from the Link matrix
matrix_all_values_from = tf.nn.embedding_lookup(Link_Matrix, tf_field_encodings)
# // TODO: Calculate the matrix_relevant_values from matrix_all_values_from
matrix_relevant_values = tf.map_fn(lambda u: tf.gather(u[0],u[1],axis=1),
[matrix_all_values_from, tf_field_encodings], dtype=matrix_all_values_from.dtype)
return tf.nn.softmax(tf.reduce_sum(tf.expand_dims(prev_attention_vectors, axis = -1) *
matrix_relevant_values, axis=1),name="softmax")
print("**step 4.3: defining the hybrid attention")
# define the hybrid of the content based and the link based attention
with tf.variable_scope("Hybrid_attention/trainable_weights"):
# for now, this is just the content_based attention:
Zt_weights = tf.get_variable("zt_gate_parameter_vector", dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
shape=(hidden_state_size + field_embedding_size + content_label_embedding_size, 1))
Zt_weights_summary = tf.summary.histogram("Hybrid_attention/zt_weights", Zt_weights)
with tf.variable_scope("Hybrid_attention"):
# define the hybrid_attention_calculator function:
def get_hybrid_attention(h_values, y_values, content_attention, link_attention):
'''
function to calcuate the hybrid attention using the content_attention and the link_attention
'''
# calculate the e_f values
e_t = tf.reduce_sum(tf.expand_dims(link_attention, axis=-1) * tf_field_embedded, axis=1)
# create the concatenated vectors from h_values e_t and y_values
input_to_zt_gate = tf.concat([h_values, e_t, y_values], axis=-1) # channel wise concatenation
# perfrom the computations of the z gate:
z_t = tf.nn.sigmoid(tf.matmul(input_to_zt_gate, Zt_weights))
# calculate z_t~ value using the empirical values = 0.2z_t + 0.5
z_t_tilde = (0.2 * z_t) + 0.5
# compute the final hybrid_attention_values using the z_t_tilde values over content and link based values
hybrid_attention = (z_t_tilde * content_attention) + ((1 - z_t_tilde) * link_attention)
# return the calculated hybrid attention:
return hybrid_attention
# ========================================================================
# | Step 5:
# ========================================================================
print("\nstep 5: creating the decoder RNN to obtain the generated summary for the structured data (The Decoder Module) ...")
with tf.variable_scope("Decoder/trainable_weights"):
# define the weights for the output projection calculation
W_output = tf.get_variable(
"output_projector_matrix", dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
shape=(hidden_state_size, content_label_vocab_size))
b_output = tf.get_variable(
"output_projector_biases", dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
shape=(content_label_vocab_size,))
# define the weights and biases for the x_t calculation
W_d = tf.get_variable(
"x_t_gate_matrix", dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
shape=((hidden_state_size + content_label_embedding_size), content_label_embedding_size))
b_d = tf.get_variable(
"x_t_gate_biases", dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),
shape=(content_label_embedding_size,))
# define the summary ops for the defined weights and biases
W_output_summary = tf.summary.histogram("Decoder/W_output", W_output)
b_output_summary = tf.summary.histogram("Decoder/b_output", b_output)
W_d_summary = tf.summary.histogram("Decoder/W_d", W_d)
b_d_summary = tf.summary.histogram("Decoder/b_d", b_d)
# create the LSTM cell to be used for decoding purposes
decoder_cell = tf.nn.rnn_cell.LSTMCell(lstm_cell_state_size)
def decode(start_tokens, mode = "inference", decoder_lengths = None, w_reuse = True):
'''
Function that defines the decoder op and returns the decoded sequence (the summary)
@params:
start_tokens = a tensor containing the start tokens (one for each sequence in the batch)
mode = a value from "training" or "inference" to determine for how long the decoder rnn is to be unrolled.
behaviour is as follows:
"training" => The rnn will be unrolled until the max(decode_lengths). decode_lengths cannot be None.
"inference" => decode_lengths is be ignored and unrolling will be done till <eos> is received
'''
with tf.variable_scope("Decoder", reuse = w_reuse):
# define the function to obtain the predictions out of the given hidden_state_values
def get_predictions(h_t_values):
'''
This function transforms the h_t_values into a one_hot_type probability vector
'''
# apply the output_projection gate to obtain the predictions from the h_t_values
predictions = tf.matmul(h_t_values, W_output) + b_output
# return the predictions:
return predictions
# define a function to obtain the values for the next input to the LSTM_cell (y_t values)
def get_y_t_values(pred_vals):
'''
pred_vals = the tensor of shape [batch_size x content_label_vocab_size]
'''
# calculate the next words to be predicted
act_preds = tf.argmax(pred_vals, axis=-1)
# perform embedding lookup for these act_preds
y_t_values = tf.nn.embedding_lookup(content_label_embedding_matrix, act_preds)
# return the calculated y_t_values
return y_t_values
# write the loop function for the raw_rnn:
def decoder_loop_function(time, cell_output, cell_state, loop_state):
'''
The decoder loop function for the raw_rnn
@params
compatible with -> https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
'''
if(cell_state is None):
# initial call of the loop function
finished = (time >= tf_label_seqs_lengths)
next_input = start_tokens
next_cell_state = encoder_final_state
emit_output = tf.placeholder(tf.float32, shape=(content_label_vocab_size))
next_loop_state = tf.zeros_like(tf_field_encodings, dtype=tf.float32)
else:
# we define the loop_state as the prev_hybrid attention_vector!
prev_attention_vectors = loop_state # extract the prev_attention_vector from the loop state
# obtain the predictions for the cell_output
preds = get_predictions(cell_output)
# obtain the y_t_values from the cell_output values:
y_t_values = get_y_t_values(preds)
''' Calculate the attention: '''
# calculate the content_based attention values using the defined module
cont_attn = get_content_based_attention_vectors(y_t_values)
# calculate the link based attention values
link_attn = get_link_based_attention_vectors(prev_attention_vectors)
# print "link_attention: ", link_attn
# calculate the hybrid_attention
hybrid_attn = get_hybrid_attention(cell_output, y_t_values, cont_attn, link_attn)
''' Calculate the x_t vector for next_input value'''
# use the hybrid_attn to attend over the encoded_input (to calculate the a_t values)
a_t_values = tf.reduce_sum(tf.expand_dims(hybrid_attn, axis=-1) * encoded_input, axis=1)
# apply the x_t gate
x_t = tf.tanh(tf.matmul(tf.concat([a_t_values, y_t_values], axis=-1), W_d) + b_d)
''' Calculate the finished vector for perfoming computations '''
# define the fninshed parameter for the loop to determine whether to continue or not.
if(mode == "training"):
finished = (time >= decoder_lengths)
elif(mode == "inference"):
temp = tf.argmax(preds, axis=-1) # obtain the output predictions in encoded form
finished = (temp == rev_content_label_dict['<eos>'])
''' Copy mechanism is left (//TODO: change the following and implement copy mechanism)'''
emit_output = preds
# The next_input is the x_t vector so calculated:
next_input = x_t
# The next loop_state is the current content_based attention
next_loop_state = hybrid_attn
# The next_cell_state is going to be equal to the cell_state. (we_don't tweak it)
next_cell_state = cell_state
# In both the cases, the return value is same.
# return all these created parameters
return (finished, next_input, next_cell_state, emit_output, next_loop_state)
# use the tf.nn.raw_rnn to define the decoder computations
outputs, _, _ = tf.nn.raw_rnn(decoder_cell, decoder_loop_function)
# return the outputs obtained from the raw_rnn:
return tf.transpose(outputs.stack(), perm=[1, 0, 2])
# ========================================================================
# | Step 6:
# ========================================================================
print("\nstep 6: defining the training computations ...")
with tf.name_scope("Training_computations"):
outputs = decode(tf_label_embedded[:, 0, :], mode="training",
decoder_lengths=tf_label_seqs_lengths, w_reuse=None)
# print the outputs:
print("\tFinal Output_Tensor obtained from the decoder: ", outputs)
# ========================================================================
# | Step 7:
# ========================================================================
print("\nstep 7: defining the cost function for optimization ...")
# define the loss (objective) function for minimization
with tf.variable_scope("Loss"):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=tf_one_hot_label_encodings))
# record the loss summary:
loss_summary = tf.summary.scalar("Objective_loss", loss)
# ========================================================================
# | Step 8:
# ========================================================================
print("\nstep 8: defining the computations for the inference mode ...")
# define the computations for the inference mode
with tf.variable_scope("inference_computations"):
inf_outputs = decode(tf_label_embedded[:, 0, :])
print("\tInference outputs: ", inf_outputs)
# ========================================================================
# | Step _:
# ========================================================================
print("\nstep _ : setting up the errands for TensorFlow ...")
with tf.variable_scope("Errands"):
all_summaries = tf.summary.merge_all()
print("=============================================================================================================\n\n")
# Generate the interface dictionary object for this defined graph
interface_dict = {
# Tensors for input placeholders into the graph
"input": {
"field_encodings": tf_field_encodings,
"content_encodings": tf_content_encodings,
"label_encodings": tf_label_encodings,
"input_sequence_lengths": tf_input_seqs_lengths,
"label_sequence_lengths": tf_label_seqs_lengths
},
# Tensors for embedding matrices:
"field_embeddings": field_embedding_matrix,
"content_label_embeddings": content_label_embedding_matrix,
# Tensor for loass
"loss": loss,
# Tensor for the inference output:
"inference": inf_outputs,
# Tensor for training outputs
"training_output": outputs,
# Tensor for init and summary_ops
"summary": all_summaries
}
# return the built graph object and it's interface dictionary:
return graph, interface_dict
| [
[
[
402,
418
],
[
867,
869
],
[
905,
907
],
[
1533,
1535
],
[
1599,
1601
],
[
1614,
1616
],
[
1709,
1711
],
[
1724,
1726
],
[
1819,
1821
],
[
1834,
1836
],
[
2042,
2044
],
[
2057,
2059
],
[
2263,
2265
],
[
2278,
2280
],
[
2419,
2421
],
[
2498,
2500
],
[
3392,
3394
],
[
3485,
3487
],
[
3691,
3693
],
[
3806,
3808
],
[
3879,
3881
],
[
3994,
3996
],
[
4176,
4178
],
[
4291,
4293
],
[
4336,
4338
],
[
4504,
4506
],
[
4819,
4821
],
[
4928,
4930
],
[
5254,
5256
],
[
5335,
5337
],
[
5812,
5814
],
[
5891,
5893
],
[
5954,
5956
],
[
6229,
6231
],
[
6946,
6948
],
[
7390,
7392
],
[
7538,
7540
],
[
7625,
7627
],
[
7745,
7747
],
[
7901,
7903
],
[
8096,
8098
],
[
8183,
8185
],
[
8316,
8318
],
[
8470,
8472
],
[
8563,
8565
],
[
8655,
8657
],
[
8750,
8752
],
[
8834,
8836
],
[
10658,
10660
],
[
11002,
11004
],
[
11113,
11115
],
[
11137,
11139
],
[
11468,
11470
],
[
12564,
12566
],
[
12711,
12713
],
[
12761,
12763
],
[
12826,
12828
],
[
13054,
13056
],
[
13133,
13135
],
[
14747,
14749
],
[
14895,
14897
],
[
14980,
14982
],
[
15039,
15041
],
[
15222,
15224
],
[
15307,
15309
],
[
15366,
15368
],
[
15598,
15600
],
[
15671,
15673
],
[
15726,
15728
],
[
15937,
15939
],
[
16014,
16016
],
[
16073,
16075
],
[
16326,
16328
],
[
16411,
16413
],
[
16491,
16493
],
[
16561,
16563
],
[
16704,
16706
],
[
23188,
23190
],
[
23833,
23835
],
[
23879,
23881
],
[
23911,
23913
],
[
24073,
24075
],
[
24455,
24457
],
[
24887,
24889
],
[
24945,
24947
],
[
9220,
9222
],
[
9235,
9237
],
[
9404,
9406
],
[
9419,
9421
],
[
9573,
9575
],
[
9588,
9590
],
[
9757,
9759
],
[
9925,
9927
],
[
9940,
9942
],
[
10028,
10030
],
[
10171,
10173
],
[
10259,
10261
],
[
10386,
10388
],
[
11847,
11849
],
[
12037,
12039
],
[
12233,
12235
],
[
12247,
12249
],
[
12261,
12263
],
[
13546,
13548
],
[
13560,
13562
],
[
13746,
13748
],
[
13902,
13904
],
[
13916,
13918
],
[
17538,
17540
],
[
22742,
22744
],
[
22873,
22875
],
[
12057,
12059
],
[
18021,
18023
],
[
18545,
18547
],
[
18676,
18678
],
[
19550,
19552
],
[
19565,
19567
],
[
19653,
19655
],
[
19693,
19695
],
[
21054,
21056
],
[
21068,
21070
],
[
21206,
21208
],
[
21214,
21216
],
[
21224,
21226
],
[
21683,
21685
]
],
[
[
461,
482
]
]
] |
import requests
from termcolor import cprint
class trace:
def __init__(self,url):
self.url = url
def checktrace(self):
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/data/mysql_error_trace.inc"
if '://' not in self.url:
self.url = 'http://' + self.url + '/'
url = self.url
vulnurl = url + payload
try:
r = requests.get(url=vulnurl,headers=headers)
if r.status_code == 200 and r"<?php" in r.text():
cprint("mysql trace is vulnable:" + vulnurl,"red")
except:
return False | [
[
[
7,
15
],
[
536,
544
]
],
[
[
38,
44
],
[
656,
662
]
],
[
[
53,
58
]
]
] |
# -*- coding: utf-8 -*-
#
# Modified by Peize Sun, Rufeng Zhang
# Contact: {sunpeize, cxrfzhang}@foxmail.com
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_sparsercnn_config(cfg):
"""
Add config for SparseRCNN.
"""
cfg.MODEL.SparseRCNN = CN()
cfg.MODEL.SparseRCNN.NUM_CLASSES = 80
cfg.MODEL.SparseRCNN.NUM_PROPOSALS = 300
# RCNN Head.
cfg.MODEL.SparseRCNN.NHEADS = 8
cfg.MODEL.SparseRCNN.DROPOUT = 0.0
cfg.MODEL.SparseRCNN.DIM_FEEDFORWARD = 2048
cfg.MODEL.SparseRCNN.ACTIVATION = 'relu'
cfg.MODEL.SparseRCNN.HIDDEN_DIM = 256
cfg.MODEL.SparseRCNN.NUM_CLS = 1
cfg.MODEL.SparseRCNN.NUM_REG = 3
cfg.MODEL.SparseRCNN.NUM_HEADS = 6
# Dynamic Conv.
cfg.MODEL.SparseRCNN.NUM_DYNAMIC = 2
cfg.MODEL.SparseRCNN.DIM_DYNAMIC = 64
# Loss.
cfg.MODEL.SparseRCNN.CLASS_WEIGHT = 2.0
cfg.MODEL.SparseRCNN.GIOU_WEIGHT = 2.0
cfg.MODEL.SparseRCNN.L1_WEIGHT = 5.0 #5.0
cfg.MODEL.SparseRCNN.DEEP_SUPERVISION = True
cfg.MODEL.SparseRCNN.NO_OBJECT_WEIGHT = 0.1
# Focal Loss.
cfg.MODEL.SparseRCNN.USE_FOCAL = True
cfg.MODEL.SparseRCNN.ALPHA = 0.25
cfg.MODEL.SparseRCNN.GAMMA = 2.0
cfg.MODEL.SparseRCNN.PRIOR_PROB = 0.01
# Optimizer.
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0
| [
[
[
212,
225
],
[
334,
336
]
],
[
[
232,
253
]
]
] |
# vim: set encoding=utf-8
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
class SimpleDataSource(object):
annotation = "simple"
def __init__(self, schema=None, rows=None, columns=None):
if not ((rows is None) ^ (columns is None)):
raise ValueError("Either rows or columns must be supplied")
if schema and not isinstance(schema, OrderedDict):
self.schema = OrderedDict(schema)
else:
self.schema = schema
self.rows = rows
self.columns = columns
if columns:
names = self.schema.keys()
if len(names) != len(self.columns):
raise ValueError("number of columns in schema not equals number of columns provided")
for key in self.columns.keys():
if key not in names:
raise ValueError("names in schema do not all match the names in the columns provided")
def to_pandas_dataframe(self):
import numpy as np
from pandas import DataFrame
if self.rows:
a = np.array(self.rows, dtype=_schema_as_numpy_dtype(self.schema))
df = DataFrame(a)
else: # columns
df = DataFrame(self.columns)
return df
def _schema_as_numpy_dtype(schema):
return [(c, _get_numpy_dtype_from_core_type(t)) for c, t in schema.items()]
def _get_numpy_dtype_from_core_type(t):
return object
# if t in [str, unicode, dict, bytearray, list]:
# return object
# return t
| [
[
[
653,
664
],
[
958,
969
],
[
998,
1009
]
],
[
[
672,
688
]
],
[
[
1838,
1860
],
[
1682,
1704
]
],
[
[
1955,
1986
],
[
1886,
1917
]
]
] |
from torchvision import datasets, transforms
from eeg_ml.base import BaseDataLoader
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle, validation_split, num_workers, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
| [
[
[
24,
32
],
[
486,
494
]
],
[
[
34,
44
],
[
308,
318
],
[
341,
351
],
[
376,
386
]
],
[
[
70,
84
],
[
109,
123
]
],
[
[
93,
108
]
]
] |
import os
from alttprbot.tournament.core import TournamentConfig
from alttprbot_discord.bot import discordbot
from .sglcore import SGLCoreTournamentRace
class TWWR(SGLCoreTournamentRace):
async def configuration(self):
guild = discordbot.get_guild(590331405624410116)
return TournamentConfig(
guild=guild,
racetime_category='twwr',
racetime_goal="Spoiler Log Race",
event_slug="sgl21twwr",
audit_channel=discordbot.get_channel(772351829022474260),
commentary_channel=discordbot.get_channel(631564559018098698),
coop=False,
stream_delay=60,
gsheet_id=os.environ.get("SGL_RESULTS_SHEET")
)
async def create_race_room(self):
self.rtgg_handler = await self.rtgg_bot.startrace(
goal=self.data.racetime_goal,
invitational=False,
unlisted=False,
info_user=self.race_info,
start_delay=15,
time_limit=24,
streaming_required=False,
auto_start=False,
allow_comments=True,
hide_comments=True,
allow_prerace_chat=True,
allow_midrace_chat=False,
allow_non_entrant_chat=False,
chat_message_delay=0,
team_race=False,
)
return self.rtgg_handler
@property
def race_info(self):
return f"SGL 2021 - {self.notes} - {self.versus}"
@property
def notes(self):
if self.friendly_name == '':
return self.episode['match1']['note']
return self.friendly_name
| [
[
[
7,
9
],
[
681,
683
]
],
[
[
49,
65
],
[
298,
314
]
],
[
[
100,
110
],
[
242,
252
],
[
487,
497
],
[
562,
572
]
],
[
[
132,
153
],
[
167,
188
]
],
[
[
162,
166
]
]
] |
class ToolStripRenderEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.ToolStripRenderer.OnRenderImageMargin(System.Windows.Forms.ToolStripRenderEventArgs),System.Windows.Forms.ToolStripRenderer.OnRenderToolStripBorder(System.Windows.Forms.ToolStripRenderEventArgs),and System.Windows.Forms.ToolStripRenderer.OnRenderToolStripBackground(System.Windows.Forms.ToolStripRenderEventArgs) methods.
ToolStripRenderEventArgs(g: Graphics,toolStrip: ToolStrip)
ToolStripRenderEventArgs(g: Graphics,toolStrip: ToolStrip,affectedBounds: Rectangle,backColor: Color)
"""
@staticmethod
def __new__(self,g,toolStrip,affectedBounds=None,backColor=None):
"""
__new__(cls: type,g: Graphics,toolStrip: ToolStrip)
__new__(cls: type,g: Graphics,toolStrip: ToolStrip,affectedBounds: Rectangle,backColor: Color)
"""
pass
AffectedBounds=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle representing the bounds of the area to be painted.
Get: AffectedBounds(self: ToolStripRenderEventArgs) -> Rectangle
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Color that the background of the System.Windows.Forms.ToolStrip is painted with.
Get: BackColor(self: ToolStripRenderEventArgs) -> Color
"""
ConnectedArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle representing the overlap area between a System.Windows.Forms.ToolStripDropDown and its System.Windows.Forms.ToolStripDropDown.OwnerItem.
Get: ConnectedArea(self: ToolStripRenderEventArgs) -> Rectangle
"""
Graphics=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Graphics used to paint.
Get: Graphics(self: ToolStripRenderEventArgs) -> Graphics
"""
ToolStrip=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.ToolStrip to be painted.
Get: ToolStrip(self: ToolStripRenderEventArgs) -> ToolStrip
"""
| [
[
[
6,
30
]
]
] |
import sys
import os
import ode
import logging
import threading
from time import sleep, time
from genie_python.genie_startup import *
import pv_server
import render
from configurations import config_zoom as config
from collide import collide, CollisionDetector
from geometry import GeometryBox
from move import move_all
sys.path.insert(0, os.path.abspath(os.environ["MYDIRCD"]))
from monitor import Monitor
from server_common.loggers.isis_logger import IsisLogger
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(threadName)-2s) %(message)s',
)
def auto_seek(start_step_size, start_values, end_value, geometries, moves, axis_index, ignore, fine_step=None):
limit = end_value
current_value = start_values[axis_index]
if current_value == end_value:
return end_value
values = start_values[:]
last_value = None
old_points = None
step_checked = False
if current_value < end_value:
# Going up
def comp(a, b):
return a < b
step_size = abs(start_step_size)
else:
# Going down
def comp(a, b):
return a > b
step_size = -abs(start_step_size)
while last_value is None or comp(last_value, end_value):
# Move if we need to
if last_value is not None:
current_value += step_size
# print "Using step size of %f" % step_size
else:
current_value = start_values[axis_index]
if not comp(current_value, end_value):
current_value = end_value
values[axis_index] = current_value
move_all(geometries, moves, values=values[:])
# Check nothing moved too far
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > start_step_size:
# Work out a new step size
step_size *= start_step_size/delta
last_value = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if current_value == start_values[axis_index]:
# There was already a collision
limit = current_value
break
elif fine_step and fine_step < step_size:
start_values[axis_index] = last_value
limit = auto_seek(fine_step, start_values, current_value, geometries, moves, axis_index, ignore)
else:
limit = last_value
break
old_points = new_points[:]
last_value = current_value
# print "Found limits for axis %d using step size of %f" % (axis_index, step_size)
if limit is None:
raise ValueError("Null limit")
return limit
def max_delta(geometries, new_points, old_points):
# Calculate the greatest position deltas
delta = 0
for j in range(len(geometries)):
old = old_points[j]
new = new_points[j]
deltas = [map(float, n - o) for n, o in zip(new, old)]
for i, (x, y, z) in enumerate(deltas):
mag = float(x) ** 2 + float(y) ** 2 + float(z) ** 2
if mag > delta:
delta = mag
# print "New max delta of %f (%f, %f, %f) for body %d at %s from %s" % \
# (mag ** 0.5, x, y, z, j, new[i], old[i])
delta = float(delta) ** 0.5
return delta
def compare(sign):
if sign > 0:
return lambda a, b: a > b
else:
return lambda a, b: a < b
def auto_seek_limits(geometries, ignore, moves, values, limits, coarse=1.0, fine=0.1):
dynamic_limits = []
for i in range(len(values)):
logging.debug("Seeking for axis %d" % i)
lower_limit = auto_seek(coarse, values[:], min(limits[i]), geometries, moves, i, ignore, fine)
upper_limit = auto_seek(coarse, values[:], max(limits[i]), geometries, moves, i, ignore, fine)
dynamic_limits.append([lower_limit, upper_limit])
logging.debug("Found limits for axis %d at %s, %s" % (i, upper_limit, lower_limit))
return dynamic_limits
def look_ahead(start_values, pvs, is_moving, geometries, moves, ignore, max_movement=1.0, max_time=10., time_step=0.1):
# Get the indices of the axes currently moving
moving = [i for i, m in enumerate(is_moving) if m == 0] # DMOV = 0 when motors not moving
msg = "No collisions predicted in the next %fs" % max_time
safe_time = max_time
safe = True
# Only worth calculating if more than one axis is moving
if len(moving) > 1:
set_points = [None] * len(pvs)
speeds = [None] * len(pvs)
directions = [None] * len(pvs)
# Assume everything has finished moving
move_complete = [True] * len(pvs)
# Get some settings:
for i in moving:
pv = pvs[i]
set_point = get_pv(pv + '.DVAL')
speed = get_pv(pv + '.VELO')
direction = 0.
move = set_point - start_values[i]
if move > 0:
direction = 1.
if move < 0:
direction = -1.
set_points[i] = set_point
speeds[i] = speed
directions[i] = direction
# This axis has not finished moving!
move_complete[i] = False
current_time = 0.
values = start_values[:]
old_points = None
step_checked = False
last_time = None
while current_time < max_time:
if last_time is None:
values = start_values[:]
current_time = 0.
old_points = None
else:
current_time += time_step
for i in moving:
if move_complete[i] is False:
values[i] = start_values[i] + (directions[i] * speeds[i] * current_time)
comp = compare(directions[i])(values[i], set_points[i])
if comp:
values[i] = set_points[i]
# Move the bodies
move_all(geometries, moves, values=values)
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > max_movement:
# Reduce the size of the time step
time_step *= max_movement/delta
# Reset to starting point
last_time = None
old_points = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if last_time is None:
msg = "There is already a collision"
safe_time = 0.
else:
msg = "Collision expected in %.1fs - %.1fs" % (last_time, current_time)
safe_time = last_time
safe = False
break
old_points = new_points[:]
last_time = current_time
return msg, safe_time, safe
# Set the high and low dial limits for each motor
def set_limits(limits, pvs):
for limit, pv in zip(limits, pvs):
set_pv(pv + '.DLLM', limit[0])
set_pv(pv + '.DHLM', limit[1])
# Contains operating mode events
class OperatingMode(object):
def __init__(self):
# Close event to be triggered by the render thread
self.close = threading.Event()
# Set dynamic limits automatically
self.set_limits = threading.Event()
# Stop the motors on a collision
self.auto_stop = threading.Event()
# Re-calculate limits on demand
self.calc_limits = threading.Event()
def get_operation_mode(self):
return self.auto_stop.is_set(), self.set_limits.is_set(), self.close.is_set()
def set_operation_mode(self, auto_stop, set_limits, close):
if auto_stop:
self.auto_stop.set()
else:
self.auto_stop.clear()
if set_limits:
self.set_limits.set()
else:
self.set_limits.clear()
if close:
self.close.set()
else:
self.close.clear()
# The main routine to execute
def main():
# Load config:
colors = config.colors
moves = config.moves
ignore = config.ignore
pvs = config.pvs
config_limits = config.hardlimits
old_limits = config_limits[:]
# Create space objects for the live and rendered world
space = ode.Space()
render_space = ode.Space()
collision_space = ode.Space()
# Create and populate lists of geometries
geometries = []
render_geometries = []
collision_geometries = []
for i, geometry in enumerate(config.geometries):
geometries.append(GeometryBox(space, oversize=config.oversize, **geometry))
render_geometries.append(GeometryBox(render_space, **geometry))
collision_geometries.append(GeometryBox(collision_space, oversize=config.oversize, **geometry))
# Create and populate two lists of monitors
monitors = []
is_moving = []
for pv in pvs:
m = Monitor(pv + ".DRBV")
m.start()
monitors.append(m)
any_moving = Monitor(pv + ".DMOV")
any_moving.start()
is_moving.append(any_moving)
# Create a shared operating mode object to control the main thread
op_mode = OperatingMode()
# Set the default behaviour to set_limits as calculated, and auto_stop on collision
op_mode.set_limits.set()
op_mode.auto_stop.set()
# Start a logger
logger = IsisLogger()
# Create a shared render parameter object to update the render thread
parameters = render.RenderParams()
if 'blind' not in sys.argv:
# Initialise the render thread, and set it to daemon - won't prevent the main thread from exiting
renderer = render.Renderer(parameters, render_geometries, colors, monitors, pvs, moves, op_mode)
renderer.daemon = True
# Need to know if this is the first execution of the main loop
op_mode.calc_limits.set()
# Initialise the pv server
# Loop over the pvdb and update the counts based on the number of aves/bodies
for pv in pv_server.pvdb:
for key, val in pv_server.pvdb[pv].items():
if key == 'count':
if val is pv_server.axis_count:
pv_server.pvdb[pv]['count'] = len(config.pvs)
if val is pv_server.body_count:
pv_server.pvdb[pv]['count'] = len(config.geometries)
driver = pv_server.start_thread(config.control_pv, op_mode)
driver.setParam('OVERSIZE', config.oversize)
driver.setParam('COARSE', config.coarse)
driver.setParam('FINE', config.fine)
driver.setParam('NAMES', [g['name'] for g in config.geometries])
# Only report for new collisions
collision_detector = CollisionDetector(driver, collision_geometries, config.moves, monitors, config.ignore,
is_moving, logger, op_mode, config.pvs)
collision_detector.start()
# Main loop
while True:
# Freeze the positions of our current monitors by creating some dummies
# This stops the threads from trying to reading each monitor sequentially, and holding each other up
frozen = [m.value() for m in monitors]
# Execute the move
move_all(geometries, moves, values=frozen)
# Check if the oversize has been changed, ahead of any collision calcs
if driver.new_data.isSet():
for geometry, collision_geometry in zip(geometries, collision_geometries):
geometry.set_size(oversize=driver.getParam('OVERSIZE'))
collision_geometry.set_size(oversize=driver.getParam('OVERSIZE'))
driver.new_data.clear()
op_mode.calc_limits.set()
if driver.getParam("CALC") != 0:
op_mode.calc_limits.set()
collisions = collision_detector.collisions[:]
collision_message = collision_detector.message[:]
# Check if there have been any changes to the .MOVN monitors
fresh = any([m.fresh() for m in is_moving])
# Check if any of the motors monitors are moving
moving = [not m.value() for m in is_moving] # Invert because DMOV is inverted from MOVN
any_moving = any(moving)
new_limits = []
if fresh or any_moving or op_mode.calc_limits.isSet():
# Look ahead some time to see if any collisions are going to happen in the future
msg, safe_time, safe = look_ahead(frozen, config.pvs, moving, geometries, moves, ignore,
max_movement=driver.getParam('COARSE'))
if not safe and not any(collisions):
logger.write_to_log(msg, "MAJOR", "COLLIDE")
driver.setParam('MSG', msg)
else:
driver.setParam('MSG', collision_message)
logging.info(msg)
# Start timing for diagnostics
time_passed = time()
# Seek the correct limit values
dynamic_limits = auto_seek_limits(geometries, ignore, moves, frozen, config_limits,
coarse=driver.getParam('COARSE'), fine=driver.getParam('FINE'))
# Calculate and log the time taken to calculate
time_passed = (time() - time_passed) * 1000
# Log the new limits
logging.info("New limits calculated in %dms, are %s" % (time_passed, dynamic_limits))
# Set the limits according to the set_limits operating mode
if op_mode.set_limits.is_set():
# Apply the calculated limits
new_limits = dynamic_limits[:]
else:
# Restore the configuration limits
new_limits = config_limits[:]
# Update the render thread parameters
parameters.update_params(dynamic_limits, collisions, time_passed)
# # Update the PVs
driver.setParam('TIME', time_passed)
driver.setParam('HI_LIM', [l[1] for l in dynamic_limits])
driver.setParam('LO_LIM', [l[0] for l in dynamic_limits])
driver.setParam('TRAVEL', [min([l[0] - m, l[1] - m], key=abs)
for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_F', [l[1] - m for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_R', [l[0] - m for l, m in zip(dynamic_limits, frozen)])
driver.updatePVs()
if 'blind' not in sys.argv:
# On the first run, start the renderer
if renderer.is_alive() is False:
renderer.start()
op_mode.calc_limits.clear()
driver.setParam("CALC", False)
else:
# Restore the configuration limits
if op_mode.set_limits.is_set() is False:
new_limits = config_limits[:]
# Stop us overloading the limits
if not new_limits == old_limits:
threading.Thread(target=set_limits, args=(new_limits, pvs)).start()
old_limits = new_limits[:]
# Exit the program
if op_mode.close.is_set():
# Restore the configuration limits
set_limits(config_limits, pvs)
return
# Give the CPU a break
sleep(0.01)
if 'return' in sys.argv:
return
# Execute main
main()
| [
[
[
7,
10
],
[
322,
325
],
[
10169,
10172
],
[
15080,
15083
],
[
15922,
15925
]
],
[
[
18,
20
],
[
341,
343
],
[
357,
359
]
],
[
[
28,
31
],
[
8931,
8934
],
[
8962,
8965
],
[
8996,
8999
]
],
[
[
39,
46
],
[
469,
476
],
[
495,
502
],
[
3897,
3904
],
[
4213,
4220
],
[
13412,
13419
],
[
13921,
13928
]
],
[
[
54,
63
],
[
7856,
7865
],
[
7944,
7953
],
[
8029,
8038
],
[
8115,
8124
],
[
15570,
15579
]
],
[
[
81,
86
],
[
15886,
15891
]
],
[
[
88,
92
],
[
13500,
13504
],
[
13846,
13850
]
],
[
[
132,
133
],
[
5092,
5098
],
[
5133,
5139
],
[
7618,
7624
],
[
7657,
7663
]
],
[
[
142,
151
],
[
10647,
10656
],
[
10687,
10696
],
[
10772,
10781
],
[
10814,
10823
],
[
10886,
10895
],
[
10928,
10937
],
[
10995,
11004
]
],
[
[
159,
165
],
[
10124,
10130
],
[
10304,
10310
]
],
[
[
193,
214
],
[
8700,
8706
],
[
8726,
8732
],
[
8752,
8758
],
[
8776,
8782
],
[
8807,
8813
],
[
9165,
9171
],
[
9239,
9245
],
[
9415,
9421
],
[
10848,
10854
],
[
10962,
10968
],
[
11018,
11024
],
[
11079,
11085
],
[
11126,
11132
],
[
11169,
11175
],
[
11231,
11237
],
[
11362,
11368
],
[
11386,
11392
],
[
11472,
11478
],
[
13035,
13041
]
],
[
[
235,
242
],
[
2235,
2242
],
[
6982,
6989
]
],
[
[
244,
261
],
[
11314,
11331
]
],
[
[
283,
294
],
[
9211,
9222
],
[
9302,
9313
],
[
9377,
9388
]
],
[
[
312,
320
],
[
1641,
1649
],
[
6286,
6294
],
[
11821,
11829
]
],
[
[
402,
409
],
[
9562,
9569
],
[
9651,
9658
]
],
[
[
456,
466
],
[
10019,
10029
]
],
[
[
610,
619
],
[
2590,
2599
],
[
3961,
3970
],
[
4064,
4073
]
],
[
[
2995,
3004
],
[
1887,
1896
],
[
6507,
6516
]
],
[
[
3633,
3640
],
[
6115,
6122
]
],
[
[
3749,
3765
],
[
13581,
13597
]
],
[
[
4330,
4340
],
[
13016,
13026
]
],
[
[
7546,
7556
],
[
15594,
15604
],
[
15796,
15806
]
],
[
[
7729,
7742
],
[
9823,
9836
]
],
[
[
8660,
8664
],
[
15968,
15972
]
]
] |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import RandomUniform, Initializer, Constant
import numpy as np
class InitCentersRandom(Initializer):
""" Initializer for initialization of centers of RBF network
as random samples from the given data set.
# Arguments
X: matrix, dataset to choose the centers from (random rows
are taken as centers)
"""
def __init__(self, X):
self.X = X
def __call__(self, shape, dtype=None):
assert shape[1] == self.X.shape[1]
idx = tf.constant( np.random.randint(self.X.shape[0], size=shape[0]) )
return self.X[idx, :]
class RBFLayer(Layer):
""" Layer of Gaussian RBF units.
# Example
```python
model = Sequential()
model.add(RBFLayer(10,
initializer=InitCentersRandom(X),
betas=1.0,
input_shape=(1,)))
model.add(Dense(1))
```
# Arguments
output_dim: number of hidden units (i.e. number of outputs of the
layer)
initializer: instance of initiliazer to initialize centers
betas: float, initial value for betas
"""
def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):
self.output_dim = output_dim
self.init_betas = betas
if not initializer:
self.initializer = RandomUniform(0.0, 1.0)
else:
self.initializer = initializer
super(RBFLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.centers = self.add_weight(name='centers',
shape=(self.output_dim, input_shape[1]),
initializer=self.initializer,
trainable=True)
self.betas = self.add_weight(name='betas',
shape=(self.output_dim,),
initializer=Constant(
value=self.init_betas),
# initializer='ones',
trainable=True)
super(RBFLayer, self).build(input_shape)
def call(self, x):
C = K.expand_dims(self.centers)
H = K.transpose(C-K.transpose(x))
return K.exp(-self.betas * K.sum(H**2, axis=1))
# C = self.centers[np.newaxis, :, :]
# X = x[:, np.newaxis, :]
# diffnorm = K.sum((C-X)**2, axis=-1)
# ret = K.exp( - self.betas * diffnorm)
# return ret
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
# have to define get_config to be able to use model_from_json
config = {
'output_dim': self.output_dim
}
base_config = super(RBFLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
[
[
7,
23
],
[
633,
635
]
],
[
[
53,
65
],
[
2368,
2369
],
[
2408,
2409
],
[
2422,
2423
],
[
2453,
2454
],
[
2473,
2474
]
],
[
[
102,
107
],
[
745,
750
]
],
[
[
150,
163
],
[
1506,
1519
]
],
[
[
165,
176
],
[
231,
242
]
],
[
[
178,
186
],
[
2094,
2102
]
],
[
[
194,
205
],
[
646,
648
]
],
[
[
213,
230
]
],
[
[
736,
744
],
[
1601,
1609
],
[
2296,
2304
],
[
2985,
2993
]
]
] |
import argparse
import glob
import os
import os.path as osp
import sys
import warnings
from multiprocessing import Pool
import mmcv
import numpy as np
# custom import
import pandas as pd
import pdb
def extract_frame(vid_item):
"""Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
"""
full_path, vid_path, vid_id, method, task = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
if task == 'rgb':
if args.use_opencv:
# Not like using denseflow,
# Use OpenCV will not make a sub directory with the video name
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i in range(len(vr)):
if vr[i] is not None:
w, h, c = np.shape(vr[i])
if args.new_short == 0:
out_img = mmcv.imresize(vr[i], (args.new_width,
args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr[i], (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.')
break
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd_rgb)
os.system(cmd_flow)
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(description='extract optical flows')
parser.add_argument('src_dir', type=str, help='source video directory')
parser.add_argument('out_dir', type=str, help='output rawframe directory')
parser.add_argument(
'--task',
type=str,
default='flow',
choices=['rgb', 'flow', 'both'],
help='which type of frames to be extracted')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--num-worker',
type=int,
default=8,
help='number of workers to build rawframes')
parser.add_argument(
'--flow-type',
type=str,
default=None,
choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'],
help='flow type to be generated')
parser.add_argument(
'--out-format',
type=str,
default='jpg',
choices=['jpg', 'h5', 'png'],
help='output format')
parser.add_argument(
'--ext',
type=str,
default='avi',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--new-width', type=int, default=0, help='resize image width')
parser.add_argument(
'--new-height', type=int, default=0, help='resize image height')
parser.add_argument(
'--new-short',
type=int,
default=0,
help='resize image short side length keeping ratio')
parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume optical flow extraction instead of overwriting')
parser.add_argument(
'--use-opencv',
action='store_true',
help='Whether to use opencv to extract rgb frames')
parser.add_argument(
'--input-frames',
action='store_true',
help='Whether to extract flow frames based on rgb frames')
parser.add_argument(
'--ref_listfile_path', type=str, default='', help='reference listfile path for the subset')
args = parser.parse_args()
return args
def get_subset_classes(ref_listfile_path):
df = pd.read_csv(ref_listfile_path, header=None, sep='*')
cur_data = df.values
subset_classes = []
for i,row in enumerate(cur_data):
cur_cls = row[0].split('/')[1]
cur_cls = cur_cls.replace(' ', '_').replace('(', '-').replace(')', '-')
if cur_cls not in subset_classes:
subset_classes.append(cur_cls)
return subset_classes
def filter_vid_list(vid_list, src_dir, ref_listfile_path):
subset_classes = get_subset_classes(ref_listfile_path)
filtered_vid_list = []
filtered_full_path_list = []
for vid,fpath in zip(vid_list,fullpath_list):
cur_cls = vid.split('/')[0]
if cur_cls in subset_classes:
filtered_vid_list.append(vid)
filtered_full_path_list.append(os.path.join(src_dir, vid))
return filtered_vid_list, filtered_full_path_list
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
if args.level == 2:
if args.ref_listfile_path != '':
classes = get_subset_classes(args.ref_listfile_path)
else:
classes = os.listdir(args.src_dir)
for classname in classes:
new_dir = osp.join(args.out_dir, classname)
if not osp.isdir(new_dir):
print(f'Creating folder: {new_dir}')
os.makedirs(new_dir)
if args.input_frames:
print('Reading rgb frames from folder: ', args.src_dir)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of rgb frame folders found: ', len(fullpath_list))
else:
print('Reading videos from folder: ', args.src_dir)
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of videos found: ', len(fullpath_list))
if args.resume:
fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))
fullpath_list = list(fullpath_list)
print('Resuming. number of videos to be done: ', len(fullpath_list))
if args.level == 2:
vid_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
vid_list, fullpath_list = list(map(lambda p: osp.basename(p), fullpath_list))
if args.ref_listfile_path != '':
vid_list, fullpath_list = filter_vid_list(vid_list, args.src_dir, args.ref_listfile_path)
pool = Pool(args.num_worker)
pool.map(
extract_frame,
zip(fullpath_list, vid_list, range(len(vid_list)),
len(vid_list) * [args.flow_type],
len(vid_list) * [args.task]))
| [
[
[
7,
15
],
[
4457,
4465
]
],
[
[
23,
27
],
[
8336,
8340
],
[
8409,
8413
],
[
8675,
8679
],
[
8799,
8803
]
],
[
[
35,
37
],
[
7784,
7786
],
[
7977,
7979
],
[
8200,
8202
],
[
2497,
2499
],
[
3497,
3499
],
[
4282,
4284
],
[
4309,
4311
],
[
7553,
7555
]
],
[
[
45,
59
],
[
7701,
7704
],
[
8058,
8061
],
[
8111,
8114
],
[
575,
578
],
[
588,
591
],
[
635,
638
],
[
906,
909
],
[
919,
922
],
[
974,
977
],
[
2147,
2150
],
[
2347,
2350
],
[
2625,
2628
],
[
2857,
2860
],
[
3098,
3101
],
[
3325,
3328
],
[
3576,
3579
],
[
3751,
3754
],
[
3965,
3968
],
[
4118,
4121
],
[
9225,
9228
],
[
9255,
9258
],
[
9268,
9271
],
[
9285,
9288
],
[
9414,
9417
]
],
[
[
67,
70
],
[
4389,
4392
]
],
[
[
78,
86
],
[
1910,
1918
]
],
[
[
115,
119
],
[
9599,
9603
]
],
[
[
128,
132
],
[
1028,
1032
],
[
1255,
1259
],
[
1716,
1720
],
[
1773,
1777
]
],
[
[
140,
151
],
[
1161,
1163
]
],
[
[
177,
189
],
[
6752,
6754
]
],
[
[
197,
200
]
],
[
[
206,
219
],
[
9643,
9656
]
],
[
[
4430,
4440
],
[
7676,
7686
]
],
[
[
6704,
6722
],
[
7898,
7916
],
[
7246,
7264
]
],
[
[
7170,
7185
],
[
9519,
9534
]
],
[
[
7669,
7673
],
[
7711,
7715
],
[
7760,
7764
],
[
7796,
7800
],
[
7818,
7822
],
[
7846,
7850
],
[
7917,
7921
],
[
7988,
7992
],
[
8067,
8071
],
[
8229,
8233
],
[
8298,
8302
],
[
8346,
8350
],
[
8368,
8372
],
[
8419,
8423
],
[
8441,
8445
],
[
8588,
8592
],
[
8641,
8645
],
[
8685,
8689
],
[
8707,
8711
],
[
8760,
8764
],
[
8809,
8813
],
[
8831,
8835
],
[
8919,
8923
],
[
9140,
9144
],
[
9344,
9348
],
[
9455,
9459
],
[
9545,
9549
],
[
9559,
9563
],
[
9604,
9608
],
[
9746,
9750
],
[
9792,
9796
],
[
644,
648
],
[
702,
706
],
[
749,
753
],
[
1200,
1204
],
[
1277,
1281
],
[
1349,
1353
],
[
1472,
1476
],
[
1610,
1614
],
[
2104,
2108
],
[
2264,
2268
],
[
2285,
2289
],
[
2464,
2468
],
[
2548,
2552
],
[
2582,
2586
],
[
2768,
2772
],
[
2790,
2794
],
[
3000,
3004
],
[
3055,
3059
],
[
3241,
3245
],
[
3263,
3267
],
[
3468,
3472
],
[
3533,
3537
],
[
3685,
3689
],
[
3706,
3710
],
[
3886,
3890
],
[
3907,
3911
],
[
4074,
4078
],
[
4253,
4257
]
],
[
[
7888,
7895
],
[
8027,
8034
]
],
[
[
7967,
7974
],
[
8027,
8034
]
],
[
[
8014,
8023
],
[
8081,
8090
]
],
[
[
8048,
8055
],
[
8121,
8128
],
[
8173,
8180
],
[
8212,
8219
]
],
[
[
8320,
8333
],
[
8516,
8529
],
[
8960,
8973
],
[
9319,
9332
],
[
9431,
9444
],
[
9670,
9683
],
[
7378,
7391
]
],
[
[
8388,
8406
],
[
8990,
9008
]
],
[
[
8659,
8672
],
[
8895,
8908
],
[
8960,
8973
],
[
9319,
9332
],
[
9431,
9444
],
[
9670,
9683
],
[
7378,
7391
]
],
[
[
8778,
8796
],
[
8990,
9008
]
],
[
[
8940,
8953
],
[
9040,
9053
]
],
[
[
9019,
9032
],
[
9116,
9129
],
[
9319,
9332
],
[
9431,
9444
],
[
9670,
9683
],
[
7378,
7391
]
],
[
[
9165,
9173
],
[
9535,
9543
],
[
9685,
9693
],
[
9705,
9713
],
[
9733,
9741
],
[
9779,
9787
]
],
[
[
9369,
9377
],
[
9535,
9543
],
[
9685,
9693
],
[
9705,
9713
],
[
9733,
9741
],
[
9779,
9787
]
],
[
[
9379,
9392
],
[
9670,
9683
],
[
7378,
7391
]
],
[
[
9493,
9501
],
[
9685,
9693
],
[
9705,
9713
],
[
9733,
9741
],
[
9779,
9787
]
],
[
[
9503,
9516
],
[
9670,
9683
],
[
7378,
7391
]
],
[
[
9592,
9596
],
[
9625,
9629
]
]
] |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cliff.formatters import value
from cliff.tests import test_columns
def test_value_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"no escape me"')
expected = 'A\nB\nC\n"no escape me"\n'
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = "A\nB\nC\n['the', 'value']\n"
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'A B C\nD E F\n'
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = "A B ['the', 'value']\n"
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
| [
[
[
586,
589
],
[
852,
855
],
[
1199,
1202
],
[
1517,
1520
],
[
1875,
1878
]
],
[
[
620,
625
],
[
702,
707
],
[
1022,
1027
],
[
1355,
1360
],
[
1696,
1701
]
],
[
[
650,
662
],
[
1098,
1110
],
[
1763,
1775
]
],
[
[
669,
689
]
],
[
[
970,
1009
]
],
[
[
1317,
1342
]
],
[
[
1639,
1683
]
]
] |
"""MeiliSearchHelper
Wrapper on top of the MeiliSearch API client"""
import meilisearch
from builtins import range
def remove_bad_encoding(value):
return value.replace(''', "'")
def clean_one_field(value):
if isinstance(value, bool):
return str(value)
elif isinstance(value, str):
return remove_bad_encoding(value)
return value
def clean_dict(record):
for key, value in record.items():
if isinstance(value, dict):
record[key] = clean_dict(value)
else:
record[key] = clean_one_field(value)
return record
def parse_record(record):
new_weight = {}
for k, v in record['weight'].items():
new_weight[k] = v
new_hierarchy = {}
for k, v in record['hierarchy'].items():
new_hierarchy['hierarchy_' + k] = v
new_hierarchy_radio = {}
for k, v in record['hierarchy_radio'].items():
key = 'hierarchy_radio_' + k
new_hierarchy_radio = {**{key: v}, **new_hierarchy_radio}
del record['weight']
del record['hierarchy']
del record['hierarchy_radio']
del record['hierarchy_camel']
del record['hierarchy_radio_camel']
del record['content_camel']
return {**record, **new_weight, **new_hierarchy, **new_hierarchy_radio}
class MeiliSearchHelper:
"""MeiliSearchHelper"""
# Cf the end of this file to understand these settings
SETTINGS = {
'rankingRules': [
'words',
'typo',
'attribute',
'proximity',
'exactness',
'desc(page_rank)',
'desc(level)',
'asc(position)'
],
'distinctAttribute': 'url',
'searchableAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'content',
'objectID',
'page_rank',
'level',
'position'
],
'displayedAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'anchor',
'url',
'content',
'objectID'
]
}
def __init__(self, host_url, api_key, index_uid, custom_settings):
self.meilisearch_client = meilisearch.Client(host_url, api_key)
self.meilisearch_index = self.__delete_and_create_index(index_uid)
self.add_settings(MeiliSearchHelper.SETTINGS, custom_settings)
def add_settings(self, default_settings, custom_settings):
settings = {**default_settings, **custom_settings}
self.meilisearch_index.update_settings(settings)
def add_records(self, records, url, from_sitemap):
"""Add new records to the index"""
record_count = len(records)
for i in range(0, record_count, 50):
parsed_records = list(map(parse_record, records[i:i + 50]))
cleaned_records = list(map(clean_dict, parsed_records))
self.meilisearch_index.add_documents(cleaned_records)
color = "96" if from_sitemap else "94"
print(
'\033[{}m> Docs-Scraper: \033[0m{}\033[93m {} records\033[0m)'.format(
color, url, record_count))
def __delete_and_create_index(self, index_uid):
try:
self.meilisearch_client.get_index(index_uid).delete()
except Exception:
print("The index " + index_uid + " does not exist. Creating...")
return self.meilisearch_client.create_index(index_uid, {'primaryKey': 'objectID'})
# Algolia's settings:
# {"minWordSizefor1Typo"=>3,
# "minWordSizefor2Typos"=>7,
# "hitsPerPage"=>20,
# "maxValuesPerFacet"=>100,
# "minProximity"=>1,
# "version"=>2,
# "attributesToIndex"=>
# ["unordered(hierarchy_radio_camel.lvl0)",
# "unordered(hierarchy_radio.lvl0)",
# "unordered(hierarchy_radio_camel.lvl1)",
# "unordered(hierarchy_radio.lvl1)",
# "unordered(hierarchy_radio_camel.lvl2)",
# "unordered(hierarchy_radio.lvl2)",
# "unordered(hierarchy_radio_camel.lvl3)",
# "unordered(hierarchy_radio.lvl3)",
# "unordered(hierarchy_radio_camel.lvl4)",
# "unordered(hierarchy_radio.lvl4)",
# "unordered(hierarchy_radio_camel.lvl5)",
# "unordered(hierarchy_radio.lvl5)",
# "unordered(hierarchy_camel.lvl0)",
# "unordered(hierarchy.lvl0)",
# "unordered(hierarchy_camel.lvl1)",
# "unordered(hierarchy.lvl1)",
# "unordered(hierarchy_camel.lvl2)",
# "unordered(hierarchy.lvl2)",
# "unordered(hierarchy_camel.lvl3)",
# "unordered(hierarchy.lvl3)",
# "unordered(hierarchy_camel.lvl4)",
# "unordered(hierarchy.lvl4)",
# "unordered(hierarchy_camel.lvl5)",
# "unordered(hierarchy.lvl5)",
# "content"],
# "numericAttributesToIndex"=>nil,
# "attributesToRetrieve"=>["hierarchy", "content", "anchor", "url"],
# "allowTyposOnNumericTokens"=>false,
# "ignorePlurals"=>true,
# "camelCaseAttributes"=>["hierarchy", "hierarchy_radio", "content"],
# "advancedSyntax"=>true,
# "attributeCriteriaComputedByMinProximity"=>true,
# "distinct"=>true,
# "unretrievableAttributes"=>nil,
# "optionalWords"=>nil,
# "userData"=>{"crawling_issue"=>false},
# "attributesForFaceting"=>["lang"],
# "attributesToSnippet"=>["content:10"],
# "attributesToHighlight"=>["hierarchy", "hierarchy_camel", "content"],
# "paginationLimitedTo"=>1000,
# "attributeForDistinct"=>"url",
# "exactOnSingleWordQuery"=>"attribute",
# "ranking"=>
# ["words", "filters", "typo", "attribute", "proximity", "exact", "custom"],
# "customRanking"=>
# ["desc(weight.page_rank)", "desc(weight.level)", "asc(weight.position)"],
# "separatorsToIndex"=>"",
# "removeWordsIfNoResults"=>"allOptional",
# "queryType"=>"prefixLast",
# "highlightPreTag"=>"<span class=\"algolia-docsearch-suggestion--highlight\">",
# "highlightPostTag"=>"</span>",
# "snippetEllipsisText"=>"",
# "alternativesAsExact"=>["ignorePlurals", "singleWordSynonym"]}
| [
[
[
77,
88
],
[
2932,
2943
]
],
[
[
110,
115
],
[
3449,
3454
]
],
[
[
121,
140
],
[
324,
343
]
],
[
[
194,
209
],
[
551,
566
]
],
[
[
373,
383
],
[
493,
503
],
[
3588,
3598
]
],
[
[
597,
609
],
[
3515,
3527
]
],
[
[
1278,
1295
],
[
3071,
3088
]
]
] |
"""
chromeをアプリモードで起動するためのコマンドを生成する
"""
import sys, os
from moray.exception import SupportError
name = 'chrome'
def create_command(path, url, cmdline_args):
"""
起動コマンド生成
Attributes:
path (str): chromeコマンドのパス
url (str): 接続先のURL
cmdline_args (list<str>): コマンドライン引数
Returns:
list<str>: 生成された起動コマンド
"""
return [path, '--app=' + url] + cmdline_args
def find_path():
"""
chromeの実行ファイルパスを取得
Returns:
str: chromeの実行ファイルパス
Raises:
SupportError: 対象外OSの場合
"""
if sys.platform in ('win32', 'win64'):
# Windowsの場合
return _find_chrome_windows()
else:
# 対象外OSの場合
# このOSはサポート対象外のOSです。
error_msg = 'This OS is not a supported OS.'
raise SupportError(error_msg)
def _find_chrome_windows():
"""
Windowsのchromeの実行ファイルパスを取得
Returns:
str: Windowsのchromeの実行ファイルパス
Raises:
FileNotFoundError: ブラウザ実行ファイル不明
"""
import winreg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
# HKEY_CURRENT_USER: 現在のユーザーのレジストリ設定
# HKEY_LOCAL_MACHINE: すべてのユーザーのレジストリ設定
for reg_entry in winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE:
try:
# レジストリからchromeの実行ファイルパスを取得
with winreg.OpenKey(reg_entry, reg_path, 0, winreg.KEY_READ) as reg_key:
chrome_path = winreg.QueryValueEx(reg_key, None)[0]
if os.path.isfile(chrome_path):
return chrome_path
except Exception as e:
pass
# chrome.exe が見つかりませんでした
error_msg = '"chrome.exe" is not found.'
raise FileNotFoundError(error_msg)
| [
[
[
51,
54
],
[
618,
621
]
],
[
[
56,
58
],
[
1606,
1608
]
],
[
[
90,
102
],
[
865,
877
]
],
[
[
106,
110
]
],
[
[
129,
143
]
],
[
[
448,
457
]
],
[
[
896,
916
],
[
702,
722
]
]
] |
from typing import List, Optional
import aiohttp
import json
from aiohttp.client import ClientSession
from itspylearning.consts import ITSLEARNING_URL
from itspylearning.organisation import Organisation
_clientSession: Optional[ClientSession] = None
def _getClient() -> aiohttp.ClientSession:
global _clientSession
if(_clientSession is None):
_clientSession = aiohttp.ClientSession()
return _clientSession
async def search_organisations(query) -> List[dict]:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/all/organisations/search/v1/?searchText={query}")
rawData = await response.text()
data = json.loads(rawData)
matches = []
for match in data["EntityArray"]:
matches.append({"id": match["CustomerId"], "name": match["SiteName"],})
await close_session()
return matches
async def fetch_organisation( id) -> Organisation:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/{id}/v1")
if response.status != 200:
raise Exception('Request failure.')
rawData = await response.text()
data = json.loads(rawData)
if data == None:
raise Exception("Organisation did not exist.")
organisation = Organisation(data)
await close_session()
return organisation
async def close_session():
global _clientSession
await _clientSession.close()
_clientSession = None | [
[
[
19,
23
],
[
473,
477
]
],
[
[
25,
33
],
[
222,
230
]
],
[
[
41,
48
],
[
274,
281
],
[
380,
387
]
],
[
[
56,
60
],
[
654,
658
],
[
1110,
1114
]
],
[
[
89,
102
],
[
231,
244
]
],
[
[
137,
152
],
[
526,
541
],
[
947,
962
]
],
[
[
192,
204
],
[
892,
904
],
[
1225,
1237
]
],
[
[
206,
220
],
[
222,
245
],
[
330,
344
],
[
1359,
1373
]
],
[
[
258,
268
],
[
506,
516
],
[
927,
937
]
],
[
[
432,
853
]
],
[
[
855,
1293
]
],
[
[
1296,
1407
],
[
819,
832
],
[
1254,
1267
]
],
[
[
363,
377
],
[
416,
430
]
],
[
[
1386,
1400
]
]
] |
import pytest
import uuid
from fastapi import status
#
# INVALID TESTS
#
@pytest.mark.parametrize(
"key,value",
[
("description", 123),
("description", ""),
("uuid", None),
("uuid", 1),
("uuid", "abc"),
("uuid", ""),
("value", 123),
("value", None),
("value", ""),
],
)
def test_create_invalid_fields(client, key, value):
create_json = {"value": "test"}
create_json[key] = value
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.parametrize(
"key",
[
("uuid"),
("value"),
],
)
def test_create_duplicate_unique_fields(client, key):
# Create an object
create1_json = {"uuid": str(uuid.uuid4()), "value": "test"}
client.post("/api/node/directive/", json=create1_json)
# Ensure you cannot create another object with the same unique field value
create2_json = {"value": "test2"}
create2_json[key] = create1_json[key]
create2 = client.post("/api/node/directive/", json=create2_json)
assert create2.status_code == status.HTTP_409_CONFLICT
@pytest.mark.parametrize(
"key",
[
("value"),
],
)
def test_create_missing_required_fields(client, key):
create_json = {"value": "test"}
del create_json[key]
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
#
# VALID TESTS
#
@pytest.mark.parametrize(
"key,value",
[
("description", None),
("description", "test"),
("uuid", str(uuid.uuid4()))
],
)
def test_create_valid_optional_fields(client, key, value):
# Create the object
create = client.post("/api/node/directive/", json={key: value, "value": "test"})
assert create.status_code == status.HTTP_201_CREATED
# Read it back
get = client.get(create.headers["Content-Location"])
assert get.json()[key] == value
def test_create_valid_required_fields(client):
# Create the object
create = client.post("/api/node/directive/", json={"value": "test"})
assert create.status_code == status.HTTP_201_CREATED
# Read it back
get = client.get(create.headers["Content-Location"])
assert get.json()["value"] == "test"
| [
[
[
7,
13
],
[
79,
85
],
[
616,
622
],
[
1195,
1201
],
[
1540,
1546
]
],
[
[
21,
25
],
[
1673,
1677
],
[
813,
817
]
],
[
[
47,
53
],
[
576,
582
],
[
1167,
1173
],
[
1480,
1486
],
[
1898,
1904
],
[
2214,
2220
]
],
[
[
363,
389
]
],
[
[
708,
743
]
],
[
[
1269,
1304
]
],
[
[
1701,
1734
]
],
[
[
2041,
2074
]
]
] |
from Comparison import Comparison
from Action import Action
from TransitionCodegen import TransitionCodegen
from TransitionGraphic import TransitionGraphic
import xml.etree.ElementTree as ET
class Transition:
def __init__(self, id):
self.id = id
self.fromStateID = None
self.toStateID = None
self.condition = None
self.priority = 0
self.cg = TransitionCodegen(self)
self.graphic = TransitionGraphic(self)
self.actions = []
def parseCfg(self, etreeNode):
for child in etreeNode:
if(child.tag == "from"):
self.fromStateID = int(child.text)
elif(child.tag == "to"):
self.toStateID = int(child.text)
elif(child.tag == "action"):
newAction = Action()
newAction.parseCfg(child)
self.actions.append(newAction)
elif(child.tag == "condition"):
self.condition = Comparison()
self.condition.parseCfg(child)
elif(child.tag == "priority"):
self.priority = int(child.text)
elif(child.tag == "graphic"):
self.graphic.parseCfg(child)
def dumpCfg(self):
return ET.Element() #TODO: generate XML representation of current object
| [
[
[
27,
37
],
[
1030,
1040
]
],
[
[
58,
64
],
[
851,
857
]
],
[
[
96,
113
],
[
418,
435
]
],
[
[
145,
162
],
[
466,
483
]
],
[
[
173,
200
],
[
1331,
1333
]
],
[
[
212,
222
]
]
] |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kinova_bringup"
PROJECT_SPACE_DIR = "/workspace/install"
PROJECT_VERSION = "0.0.0"
| [
[
[
57,
78
]
],
[
[
84,
115
]
],
[
[
152,
174
]
],
[
[
198,
230
]
],
[
[
267,
279
]
],
[
[
299,
316
]
],
[
[
340,
355
]
]
] |
try:
from .pfdo_med2image import pfdo_med2image
except:
from pfdo_med2image import pfdo_med2image
| [
[
[
40,
54
]
],
[
[
98,
112
]
]
] |
# CONFIG_MLH = ["//mina/config"]
CONFIG_MLH = select({
"//:profile_debug": ["//src/config/debug"],
"//:profile_dev": ["//src:dev"],
"//:profile_release": ["//src:release"],
}, no_match_error = "Unknown profile")
| [
[
[
34,
44
]
]
] |
import pytest
from PIL import Image
from img2gb.gbtile import GBTile
class Test_GBTile(object):
@pytest.fixture
def image(self):
return Image.open("./test/assets/tileset.png")
@pytest.mark.parametrize("x,result", [
(0, "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"),
(8, "FF 01 81 7F BD 7F A5 7B A5 7B BD 63 81 7F FF FF"),
(16, "7E 00 81 7F 81 7F 81 7F 81 7F 81 7F 81 7F 7E 7E"),
(24, "3C 00 54 2A A3 5F C1 3F 83 7F C5 3F 2A 7E 3C 3C"),
(32, "04 04 04 04 0A 0A 12 12 66 00 99 77 99 77 66 66"),
])
def test_from_image(self, image, x, result):
tile = GBTile.from_image(image, x)
assert tile.to_hex_string() == result
def test_put_pixel(self):
tile = GBTile()
for b in tile.data:
assert b == 0
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
tile.put_pixel(4, 0, 2)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x88
def test_get_pixel(self, image):
tile = GBTile.from_image(image, 32)
assert tile.get_pixel(0, 0) == 0b00
assert tile.get_pixel(0, 6) == 0b01
assert tile.get_pixel(2, 6) == 0b10
assert tile.get_pixel(5, 0) == 0b11
def test_to_hex_string(self):
tile = GBTile()
assert tile.to_hex_string() == "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" # noqa
tile.put_pixel(0, 0, 3)
tile.put_pixel(1, 0, 3)
assert tile.to_hex_string() == "C0 C0 00 00 00 00 00 00 00 00 00 00 00 00 00 00" # noqa
def test_to_image(self, image):
tile = GBTile.from_image(image, 32)
tile_image = tile.to_image()
assert tile_image.getpixel((0, 0)) == 0b00
assert tile_image.getpixel((0, 6)) == 0b01
assert tile_image.getpixel((2, 6)) == 0b10
assert tile_image.getpixel((5, 0)) == 0b11
def test_gbtile_equality(self):
tile1 = GBTile()
tile2 = GBTile()
assert tile1 == tile2
tile1.put_pixel(0, 0, 3)
assert tile1 != tile2
tile2.put_pixel(0, 0, 3)
assert tile1 == tile2
def test_data(self):
tile = GBTile()
assert len(tile.data) == 16
assert tile.data[0] == 0x00
assert tile.data[1] == 0x00
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
| [
[
[
7,
13
],
[
104,
110
],
[
201,
207
]
],
[
[
30,
35
],
[
155,
160
]
],
[
[
62,
68
],
[
639,
645
],
[
759,
765
],
[
1088,
1094
],
[
1343,
1349
],
[
1665,
1671
],
[
1988,
1994
],
[
2013,
2019
],
[
2224,
2230
]
],
[
[
77,
88
]
]
] |
# -*- coding: utf8 -*-
u"""
Mathics: a general-purpose computer algebra system
Copyright (C) 2011-2013 The Mathics Team
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import cProfile
import pstats
from mathics.core.definitions import Definitions
from mathics.core.evaluation import Evaluation
definitions = Definitions(add_builtin=True)
def prepare():
pass
result = None
def run():
global result
# prompt = '(1+a)(1+b)(1+c)(1+d)(1+e)//Expand'
# prompt = 'f/@Range[20000];'
# prompt = 'Plus @@ Range[50000]'
# prompt = 'Range[100000];'
try:
# prompt = 'SetAttributes[v, Flat]; v[x_]:={x}; v[a,b]'
# prompt = """(Plus@@Symbol/@CharacterRange["a","z"])^2//Expand;"""
# prompt = (
# 'Plus@@f/@Symbol/@StringJoin/@Tuples[CharacterRange["a","z"],2]')
# prompt = 'FullForm[Nest[1+Sqrt[1+#]&, x, 20]]'
# prompt = '1+2'
prompt = 'DensityPlot[x*y,{x,-1,1},{y,-1,1}]'
evaluation = Evaluation(prompt, definitions, format='xml')
if evaluation.results:
result = evaluation.results[0].result
except KeyboardInterrupt:
result = 'INTERRUPTED'
def _profile():
global result
prepare()
cProfile.run('run()', 'profile')
# print 'Result: %s\n' % result
p = pstats.Stats('profile')
p.sort_stats('cumulative').print_stats(50)
p.print_callees(20)
if __name__ == '__main__':
_profile()
| [
[
[
794,
802
],
[
1835,
1843
]
],
[
[
810,
816
],
[
1912,
1918
]
],
[
[
855,
866
],
[
929,
940
]
],
[
[
903,
913
],
[
1593,
1603
]
],
[
[
915,
926
],
[
1612,
1623
]
],
[
[
965,
972
],
[
1821,
1828
]
],
[
[
986,
992
]
],
[
[
1006,
1009
]
],
[
[
1787,
1795
],
[
2039,
2047
]
],
[
[
1682,
1688
]
],
[
[
1758,
1764
]
]
] |
from __future__ import absolute_import
from .base_transformer import Transformer # noqa
from .fireeye_hx_transformer import FireEyeHXTransformer # noqa
from .generic_transformer import GenericTransformer # noqa
from .sysmon_transformer import SysmonTransformer # noqa
from .evtx_transformer import WinEVTXTransformer # noqa
from .procmon_transformer import ProcmonTransformer # noqa
| [
[
[
23,
38
]
],
[
[
70,
81
]
],
[
[
126,
146
]
],
[
[
188,
206
]
],
[
[
247,
264
]
],
[
[
303,
321
]
],
[
[
363,
381
]
]
] |
import setuptools
from version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="python2latex",
version=__version__,
author="Jean-Samuel Leboeuf",
author_email="jean-samuel.leboeuf.1@ulaval.ca",
description="A Python to LaTeX converter",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jsleb333/python2latex",
packages=setuptools.find_packages(),
install_requires=['numpy', 'colorspacious', 'matplotlib'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
data_files=[('', ['version.py'])]
)
| [
[
[
7,
17
],
[
120,
130
],
[
476,
486
]
],
[
[
38,
49
],
[
175,
186
]
],
[
[
82,
84
],
[
109,
111
]
],
[
[
90,
106
],
[
342,
358
]
]
] |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from decimal import Decimal, ROUND_HALF_UP
from math import radians, tan, cos, sin
from os import path
_round = lambda f, r=ROUND_HALF_UP: int(Decimal(str(f)).quantize(Decimal("0"), rounding=r))
rgb = lambda r, g, b: (r, g, b)
upper_font_path = path.join(path.dirname(__file__), 'NotoSansCJKSC-Black.ttf')
downer_font_path = path.join(path.dirname(__file__), 'NotoSerifCJKSC-Black.ttf')
def get_gradient_2d(start, stop, width, height, is_horizontal=False):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def getTextWidth(text, font, width=100, height=500, recursive=False):
step = 100
img = Image.new("L", (width, height))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font, fill=255)
box = img.getbbox()
if box[2] < width - step or (recursive and box[2] == width - step):
return box[2]
else:
return getTextWidth(text=text, font=font, width=width + step, height=height, recursive=True)
def get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list=(False, False, False)):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)
return result
def createLinearGradient(steps, width, height, size=1, center=0.5):
margin_up = _round(height * (center - size / 2))
margin_down = _round(height * (1 - center - size / 2))
result = np.zeros((0, width, len(steps[0])), dtype=float)
for i, k in enumerate(steps.keys()):
if k == 0:
array = get_gradient_3d(width, _round(margin_up), steps[k], steps[k])
result = np.vstack([result, array])
continue
pk = list(steps.keys())[i - 1]
h = _round(height * size * (k - pk))
array = get_gradient_3d(width, h, steps[pk], steps[k])
result = np.vstack([result, array])
if k == 1:
array = get_gradient_3d(width, _round(margin_down), steps[k], steps[k])
result = np.vstack([result, array])
continue
return result
def genBaseImage(width=1500, height=500):
k = 0.63 # 渐变色缩放系数,不应大于1
c = 0.53 # 渐变色中心位置
downerSilverArray = createLinearGradient({
0: rgb(0, 15, 36),
0.10: rgb(255, 255, 255),
0.18: rgb(55, 58, 59),
0.25: rgb(55, 58, 59),
0.5: rgb(200, 200, 200),
0.75: rgb(55, 58, 59),
0.85: rgb(25, 20, 31),
0.91: rgb(240, 240, 240),
0.95: rgb(166, 175, 194),
1: rgb(50, 50, 50)
}, width=width, height=height, size=k, center=c)
goldArray = createLinearGradient({
0: rgb(253, 241, 0),
0.25: rgb(245, 253, 187),
0.4: rgb(255, 255, 255),
0.75: rgb(253, 219, 9),
0.9: rgb(127, 53, 0),
1: rgb(243, 196, 11)
}, width=width, height=height, size=k, center=c)
strokeRedArray = createLinearGradient({
0: rgb(255, 100, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
redArray = createLinearGradient({
0: rgb(230, 0, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
silver2Array = createLinearGradient({
0: rgb(245, 246, 248),
0.15: rgb(255, 255, 255),
0.35: rgb(195, 213, 220),
0.5: rgb(160, 190, 201),
0.51: rgb(160, 190, 201),
0.52: rgb(196, 215, 222),
1.0: rgb(255, 255, 255)
}, width=width, height=height, size=k, center=c)
navyArray = createLinearGradient({
0: rgb(16, 25, 58),
0.03: rgb(255, 255, 255),
0.08: rgb(16, 25, 58),
0.2: rgb(16, 25, 58),
1: rgb(16, 25, 58)
}, width=width, height=height, size=k, center=c)
result = {
"downerSilver": Image.fromarray(np.uint8(downerSilverArray)).crop((0, 0, width, height)),
"gold": Image.fromarray(np.uint8(goldArray)).crop((0, 0, width, height)),
"red": Image.fromarray(np.uint8(redArray)).crop((0, 0, width, height)),
"strokeRed": Image.fromarray(np.uint8(strokeRedArray)).crop((0, 0, width, height)),
"silver2": Image.fromarray(np.uint8(silver2Array)).crop((0, 0, width, height)),
"strokeNavy": Image.fromarray(np.uint8(navyArray)).crop((0, 0, width, height)), # Width: 7
"baseStrokeBlack": Image.new("RGBA", (width, height), rgb(0, 0, 0)).crop((0, 0, width, height)), # Width: 17
"strokeBlack": Image.new("RGBA", (width, height), rgb(16, 25, 58)).crop((0, 0, width, height)), # Width: 17
"strokeWhite": Image.new("RGBA", (width, height), rgb(221, 221, 221)).crop((0, 0, width, height)), # Width: 8
"baseStrokeWhite": Image.new("RGBA", (width, height), rgb(255, 255, 255)).crop((0, 0, width, height))
# Width: 8
}
for k in result.keys():
result[k].putalpha(255)
return result
def genImage(word_a="5000兆円", word_b="欲しい!", default_width=1500, height=500,
bg="white", subset=250, default_base=None):
# width = max_width
k = 0.8 # 字体缩放系数
alpha = (0, 0, 0, 0)
leftmargin = 50
upmargin = 20
font_upper = ImageFont.truetype(upper_font_path, _round(height * 0.35 * k) + upmargin)
font_downer = ImageFont.truetype(downer_font_path, _round(height * 0.35 * k) + upmargin)
# Prepare Width
upper_width = max([default_width,
getTextWidth(word_a, font_upper, width=default_width,
height=_round(height / 2))]) + 300
downer_width = max([default_width,
getTextWidth(word_b, font_upper, width=default_width,
height=_round(height / 2))]) + 300
# Prepare base - Upper (if required)
if default_width == upper_width:
upper_base = default_base
else:
upper_base = genBaseImage(width=upper_width + leftmargin, height=_round(height / 2) + upmargin)
# Prepare base - Downer (if required)
downer_base = genBaseImage(width=downer_width + leftmargin, height=_round(height / 2) + upmargin)
# if default_width == downer_width:
# downer_base = default_base
# else:
# Prepare mask - Upper
upper_mask_base = Image.new("L", (upper_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_upper = list()
upper_data = [
[
(4, 4), (4, 4), (0, 0), (0, 0), (2, -3), (0, -3), (0, -3), (0, -3)
],
[
22, 20, 16, 10, 6, 6, 3, 0
],
[
"baseStrokeBlack",
"downerSilver",
"baseStrokeBlack",
"gold",
"baseStrokeBlack",
"baseStrokeWhite",
"strokeRed",
"red",
]
]
for pos, stroke, color in zip(upper_data[0], upper_data[1], upper_data[2]):
mask_img_upper.append(upper_mask_base.copy())
mask_draw_upper = ImageDraw.Draw(mask_img_upper[-1])
mask_draw_upper.text((pos[0] + leftmargin, pos[1] + upmargin), word_a,
font=font_upper, fill=255,
stroke_width=_round(stroke * height / 500))
# Prepare mask - Downer
downer_mask_base = Image.new("L", (downer_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_downer = list()
downer_data = [
[
(5, 2), (5, 2), (0, 0), (0, 0), (0, 0), (0, -3)
], [
22, 19, 17, 8, 7, 0
], [
"baseStrokeBlack",
"downerSilver",
"strokeBlack",
"strokeWhite",
"strokeNavy",
"silver2"
]
]
for pos, stroke, color in zip(downer_data[0], downer_data[1], downer_data[2]):
mask_img_downer.append(downer_mask_base.copy())
mask_draw_downer = ImageDraw.Draw(mask_img_downer[-1])
mask_draw_downer.text((pos[0] + leftmargin, pos[1] + upmargin), word_b,
font=font_downer, fill=255,
stroke_width=_round(stroke * height / 500))
# Draw text - Upper
img_upper = Image.new("RGBA", (upper_width, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(upper_data[0], upper_data[1], upper_data[2])):
img_upper_part = Image.new("RGBA", (upper_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_upper_part.paste(upper_base[color], (0, 0), mask=mask_img_upper[i])
img_upper.alpha_composite(img_upper_part)
# Draw text - Downer
img_downer = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(downer_data[0], downer_data[1], downer_data[2])):
img_downer_part = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_downer_part.paste(downer_base[color], (0, 0), mask=mask_img_downer[i])
img_downer.alpha_composite(img_downer_part)
# img_upper.save("./uptemp.png")
# img_downer.save("./downtemp.png")
# tilt image
tiltres = list()
angle = 20
for img in [img_upper, img_downer]:
dist = img.height * tan(radians(angle))
data = (1, tan(radians(angle)), -dist, 0, 1, 0)
imgc = img.crop((0, 0, img.width + dist, img.height))
imgt = imgc.transform(imgc.size, Image.AFFINE, data, Image.BILINEAR)
tiltres.append(imgt)
# finish
previmg = Image.new("RGBA", (max([upper_width, downer_width]) + leftmargin + subset + 100, height + upmargin + 100),
(255, 255, 255, 0))
# previmg.paste(tiltres[0], (0, 0))
# previmg.paste(tiltres[1], (subset, _round(height/2)))
previmg.alpha_composite(tiltres[0], (0, 50), (0, 0))
if upper_width > downer_width + subset:
previmg.alpha_composite(tiltres[1], (upper_width + subset - downer_width, _round(height / 2) + 50), (0, 0))
else:
previmg.alpha_composite(tiltres[1], (subset, _round(height / 2) + 50), (0, 0))
# previmg.save("./test1.png")
croprange = previmg.getbbox()
img = previmg.crop(croprange)
final_image = Image.new("RGB", (img.size[0] + 100, img.size[1] + 100), bg)
final_image.paste(img, (50, 50))
return final_image
| [
[
[
16,
21
],
[
815,
820
],
[
4280,
4285
],
[
4371,
4376
],
[
4453,
4458
],
[
4540,
4545
],
[
4631,
4636
],
[
4723,
4728
],
[
4829,
4834
],
[
4944,
4949
],
[
5062,
5067
],
[
5186,
5191
],
[
6751,
6756
],
[
7762,
7767
],
[
8673,
8678
],
[
8858,
8863
],
[
9120,
9125
],
[
9321,
9326
],
[
9932,
9937
],
[
9952,
9957
],
[
10029,
10034
],
[
10726,
10731
]
],
[
[
23,
32
],
[
859,
868
],
[
7461,
7470
],
[
8378,
8387
]
],
[
[
34,
43
],
[
5653,
5662
],
[
5746,
5755
]
],
[
[
52,
63
],
[
576,
578
],
[
584,
586
],
[
657,
659
],
[
665,
667
],
[
1283,
1285
],
[
1748,
1750
],
[
1964,
1966
],
[
2181,
2183
],
[
2335,
2337
],
[
4296,
4298
],
[
4387,
4389
],
[
4469,
4471
],
[
4556,
4558
],
[
4647,
4649
],
[
4739,
4741
]
],
[
[
85,
92
],
[
213,
220
],
[
238,
245
]
],
[
[
94,
107
]
],
[
[
126,
133
],
[
9754,
9761
],
[
9794,
9801
]
],
[
[
135,
138
],
[
9750,
9753
],
[
9790,
9793
]
],
[
[
140,
143
]
],
[
[
145,
148
]
],
[
[
165,
169
],
[
319,
323
],
[
329,
333
],
[
400,
404
],
[
410,
414
]
],
[
[
173,
179
],
[
1637,
1643
],
[
1693,
1699
],
[
1903,
1909
],
[
2066,
2072
],
[
2272,
2278
],
[
5689,
5695
],
[
5783,
5789
],
[
6005,
6011
],
[
6197,
6203
],
[
6427,
6433
],
[
6575,
6581
],
[
6793,
6799
],
[
7676,
7682
],
[
7805,
7811
],
[
8598,
8604
],
[
8705,
8711
],
[
8903,
8909
],
[
9166,
9172
],
[
9367,
9373
],
[
10469,
10475
],
[
10568,
10574
]
],
[
[
266,
269
],
[
2568,
2571
],
[
2599,
2602
],
[
2634,
2637
],
[
2666,
2669
],
[
2697,
2700
],
[
2732,
2735
],
[
2764,
2767
],
[
2796,
2799
],
[
2831,
2834
],
[
2863,
2866
],
[
2987,
2990
],
[
3020,
3023
],
[
3054,
3057
],
[
3089,
3092
],
[
3121,
3124
],
[
3150,
3153
],
[
3281,
3284
],
[
3313,
3316
],
[
3344,
3347
],
[
3372,
3375
],
[
3492,
3495
],
[
3522,
3525
],
[
3553,
3556
],
[
3581,
3584
],
[
3705,
3708
],
[
3740,
3743
],
[
3775,
3778
],
[
3809,
3812
],
[
3844,
3847
],
[
3879,
3882
],
[
3913,
3916
],
[
4040,
4043
],
[
4072,
4075
],
[
4107,
4110
],
[
4138,
4141
],
[
4167,
4170
],
[
4864,
4867
],
[
4979,
4982
],
[
5097,
5100
],
[
5221,
5224
]
],
[
[
301,
316
],
[
5672,
5687
]
],
[
[
381,
397
],
[
5765,
5781
]
],
[
[
471,
486
],
[
1469,
1484
]
],
[
[
722,
734
],
[
1077,
1089
],
[
5907,
5919
],
[
6098,
6110
]
],
[
[
1172,
1187
],
[
1880,
1895
],
[
2116,
2131
],
[
2249,
2264
]
],
[
[
1556,
1576
],
[
2533,
2553
],
[
2952,
2972
],
[
3246,
3266
],
[
3457,
3477
],
[
3670,
3690
],
[
4005,
4025
]
],
[
[
2412,
2424
],
[
6375,
6387
],
[
6522,
6534
]
],
[
[
5386,
5394
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Idiota object types
tree - A tree (directory listing) object that represents the directory structure in a tree object.
commit(ref) - A object that represents the changes in a single commit.
blob - A blob object that represents a file or a piece of data.
parent - A object that represents the ancestor to the commit in the DaG.
tag - A object that represents a meta info.
"""
__author__ = "prakashsellathurai"
__copyright__ = "Copyright 2021"
__version__ = "1.0.1"
__email__ = "prakashsellathurai@gmail.com"
import os
import hashlib
import shutil
import json
from collections import namedtuple
from contextlib import contextmanager
GIT_DIR = None
RefValue = namedtuple('RefValue', ['symbolic', 'value'])
@contextmanager
def change_git_dir(new_dir) -> None:
"""
Change the current git directory
Args:
new_dir (str): new git directory
Yields:
str: old git directory
"""
global GIT_DIR
old_dir = GIT_DIR
GIT_DIR = f'{new_dir}/.idiota'
yield
GIT_DIR = old_dir
def init() -> None:
"""
Create .idiota directory
Returns:
None
"""
os.makedirs(GIT_DIR, exist_ok=True)
os.makedirs(f'{GIT_DIR}/objects')
def update_ref(ref, value, deref: bool=True) -> None:
""" Update a ref
Args:
ref (str): ref name
value (str): ref value
deref (bool): dereference symbolic refs
Returns:
None
"""
# TODO: check if ref exists
# TODO: check if value is valid
# TODO: check if ref is symbolic
ref = _get_ref_internal(ref, deref)[0]
assert value.value
if value.symbolic:
value = f'ref: {value.value}'
else:
value = value.value
ref_path = f'{GIT_DIR}/{ref}'
os.makedirs(os.path.dirname(ref_path), exist_ok=True)
with open(ref_path, 'w') as f:
f.write(value)
def get_ref(ref, deref=True) -> RefValue:
""" Get a ref value
Args:
ref (str): ref name
deref (bool): dereference symbolic refs
Returns:
RefValue(str): ref value
"""
return _get_ref_internal(ref, deref)[1]
def delete_ref(ref, deref=True)->None:
""" Delete a ref"""
ref = _get_ref_internal(ref, deref)[0]
os.remove(f'{GIT_DIR}/{ref}')
def _get_ref_internal(ref, deref) -> RefValue:
""" Get a ref value
Args:
ref (str): ref name
deref (bool): dereference symbolic refs
Returns:
RefValue (str): ref value
"""
ref_path = f'{GIT_DIR}/{ref}'
value = None
if os.path.isfile(ref_path):
with open(ref_path) as f:
value = f.read().strip()
symbolic = bool(value) and value.startswith('ref:')
if symbolic:
value = value.split(':', 1)[1].strip()
if deref:
return _get_ref_internal(value, deref=True)
return ref, RefValue(symbolic=symbolic, value=value)
def iter_refs(prefix='', deref=True):
""" Iterate over refs
Args:
prefix (str): ref prefix
deref (bool): dereference symbolic refs
Returns:
Iterator[Tup(str, RefValue)]: ref name and ref value
"""
refs = ['HEAD', 'MERGE_HEAD']
for root, _, filenames in os.walk(f'{GIT_DIR}/refs/'):
root = os.path.relpath(root, GIT_DIR)
refs.extend(f'{root}/{name}' for name in filenames)
for refname in refs:
if not refname.startswith(prefix):
continue
ref = get_ref(refname, deref=deref)
if ref.value:
yield refname, ref
@contextmanager
def get_index():
""" Get index
Yields:
Index: index
"""
index = {}
if os.path.isfile(f'{GIT_DIR}/index'):
with open(f'{GIT_DIR}/index') as f:
index = json.load(f)
yield index
with open(f'{GIT_DIR}/index', 'w') as f:
json.dump(index, f)
def hash_object(data: object, type_='blob')-> str:
"""
Hash an object
uses: Sha1 algorithm
Args:
data (bytes): object data
Returns:
str: object id
"""
obj = type_.encode() + b'\x00' + data
oid = hashlib.sha1(obj).hexdigest()
with open(f'{GIT_DIR}/objects/{oid}', 'wb') as out:
out.write(obj)
return oid
def get_object(oid: str, expected='blob')-> object:
"""
get an object
Args:
oid (str): object id
Returns:
bytes: object data
"""
with open(f'{GIT_DIR}/objects/{oid}', 'rb') as f:
obj = f.read()
first_null = obj.index(b'\x00')
type_ = obj[:first_null].decode()
content = obj[first_null + 1:]
if expected is not None:
assert type_ == expected, f'Expected {expected}, got {type_}'
return content
def object_exists(oid: bool)-> bool:
"""
checks if object of given id exists in the repository
Args:
oid (str): object id
Returns:
bool: True if object exists
"""
return os.path.isfile(f'{GIT_DIR}/objects/{oid}')
def fetch_object_if_missing(oid, remote_git_dir):
"""
fetch object from remote repository if it is not present in local repository
Args:
oid (str): object id
remote_git_dir (str): remote git directory
Returns:
None
"""
if object_exists(oid):
return
remote_git_dir += '/.ugit'
shutil.copy(f'{remote_git_dir}/objects/{oid}',
f'{GIT_DIR}/objects/{oid}')
def push_object(oid, remote_git_dir):
"""
push object to remote repository
Args:
oid (str): object id
remote_git_dir (str): remote git directory
Returns:
None
"""
remote_git_dir += '/.ugit'
shutil.copy(f'{GIT_DIR}/objects/{oid}',
f'{remote_git_dir}/objects/{oid}')
| [
[
[
462,
472
]
],
[
[
496,
509
]
],
[
[
529,
540
]
],
[
[
551,
560
]
],
[
[
602,
604
],
[
1222,
1224
],
[
1262,
1264
],
[
1864,
1866
],
[
1876,
1878
],
[
2341,
2343
],
[
2653,
2655
],
[
3316,
3318
],
[
3360,
3362
],
[
3755,
3757
],
[
5058,
5060
]
],
[
[
612,
619
],
[
4223,
4230
]
],
[
[
627,
633
],
[
5457,
5463
],
[
5802,
5808
]
],
[
[
641,
645
],
[
3855,
3859
],
[
3939,
3943
]
],
[
[
671,
681
],
[
748,
758
]
],
[
[
705,
719
],
[
796,
810
],
[
3641,
3655
]
],
[
[
722,
729
],
[
1045,
1052
],
[
1234,
1241
],
[
1277,
1284
],
[
1844,
1851
],
[
2354,
2361
],
[
2613,
2620
],
[
3327,
3334
],
[
3382,
3389
],
[
3773,
3780
],
[
3812,
3819
],
[
3903,
3910
],
[
4270,
4277
],
[
4546,
4553
],
[
5076,
5083
],
[
5523,
5530
],
[
5817,
5824
]
],
[
[
737,
745
],
[
2010,
2018
],
[
2410,
2418
],
[
2962,
2970
]
],
[
[
815,
829
]
],
[
[
1126,
1130
]
],
[
[
1305,
1315
]
],
[
[
1982,
1989
],
[
3555,
3562
]
],
[
[
2235,
2245
]
],
[
[
2377,
2394
],
[
1669,
1686
],
[
2196,
2213
],
[
2304,
2321
],
[
2908,
2925
]
],
[
[
3009,
3018
]
],
[
[
3660,
3669
]
],
[
[
3965,
3976
]
],
[
[
4353,
4363
]
],
[
[
4841,
4854
],
[
5387,
5400
]
],
[
[
5107,
5130
]
],
[
[
5554,
5565
]
],
[
[
1057,
1064
]
],
[
[
1102,
1109
]
]
] |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import lit
import os
import pipes
import re
import subprocess
class CxxStandardLibraryTest(lit.formats.TestFormat):
"""
Lit test format for the C++ Standard Library conformance test suite.
This test format is based on top of the ShTest format -- it basically
creates a shell script performing the right operations (compile/link/run)
based on the extension of the test file it encounters. It supports files
with the following extensions:
FOO.pass.cpp - Compiles, links and runs successfully
FOO.pass.mm - Same as .pass.cpp, but for Objective-C++
FOO.run.fail.cpp - Compiles and links successfully, but fails at runtime
FOO.compile.pass.cpp - Compiles successfully, link and run not attempted
FOO.compile.fail.cpp - Does not compile successfully
FOO.link.pass.cpp - Compiles and links successfully, run not attempted
FOO.link.fail.cpp - Compiles successfully, but fails to link
FOO.sh.<anything> - A builtin Lit Shell test
FOO.verify.cpp - Compiles with clang-verify
FOO.fail.cpp - Compiled with clang-verify if clang-verify is
supported, and equivalent to a .compile.fail.cpp
test otherwise. This is supported only for backwards
compatibility with the test suite.
Substitution requirements
===============================
The test format operates by assuming that each test's configuration provides
the following substitutions, which it will reuse in the shell scripts it
constructs:
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{exec} - A command to prefix the execution of executables
Note that when building an executable (as opposed to only compiling a source
file), all three of %{flags}, %{compile_flags} and %{link_flags} will be used
in the same command line. In other words, the test format doesn't perform
separate compilation and linking steps in this case.
Additional supported directives
===============================
In addition to everything that's supported in Lit ShTests, this test format
also understands the following directives inside test files:
// FILE_DEPENDENCIES: file, directory, /path/to/file
This directive expresses that the test requires the provided files
or directories in order to run. An example is a test that requires
some test input stored in a data file. When a test file contains
such a directive, this test format will collect them and make them
available in a special %{file_dependencies} substitution. The intent
is that if one needs to e.g. execute tests on a remote host, the
%{exec} substitution could use %{file_dependencies} to know which
files and directories to copy to the remote host.
// ADDITIONAL_COMPILE_FLAGS: flag1, flag2, flag3
This directive will cause the provided flags to be added to the
%{compile_flags} substitution for the test that contains it. This
allows adding special compilation flags without having to use a
.sh.cpp test, which would be more powerful but perhaps overkill.
Additional provided substitutions and features
==============================================
The test format will define the following substitutions for use inside
tests:
%{verify}
This expands to the set of flags that must be passed to the
compiler in order to use Clang-verify, if that is supported.
verify-support
This Lit feature will be made available when the compiler supports
Clang-verify. This can be used to disable tests that require that
feature, such as `.verify.cpp` tests.
%{file_dependencies}
Expands to the list of files that this test depends on.
See FILE_DEPENDENCIES above.
%{build}
Expands to a command-line that builds the current source
file with the %{flags}, %{compile_flags} and %{link_flags}
substitutions, and that produces an executable named %t.exe.
%{run}
Equivalent to `%{exec} %t.exe`. This is intended to be used
in conjunction with the %{build} substitution.
Design notes
============
This test format never implicitly disables a type of test. For example,
we could be tempted to automatically mark `.verify.cpp` tests as
UNSUPPORTED when clang-verify isn't supported by the compiler. However,
this sort of logic has been known to cause tests to be ignored in the
past, so we favour having tests mark themselves as unsupported explicitly.
This test format still needs work in the following areas:
- It is unknown how well it works on Windows yet.
"""
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$', '[.]run[.]fail[.]cpp$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
def _checkBaseSubstitutions(self, substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
assert s in substitutions, "Required substitution {} was not provided".format(s)
# Determine whether clang-verify is supported.
def _supportsVerify(self, test, litConfig):
command = "echo | %{cxx} -xc++ - -Werror -fsyntax-only -Xclang -verify-ignore-unexpected"
command = lit.TestRunner.applySubstitutions([command], test.config.substitutions,
recursion_limit=test.config.recursiveExpansionLimit)[0]
devNull = open(os.devnull, 'w')
result = subprocess.call(command, shell=True, stdout=devNull, stderr=devNull)
return result == 0
def _disableWithModules(self, test, litConfig):
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
return b'#define _LIBCPP_ASSERT' in contents
def execute(self, test, litConfig):
self._checkBaseSubstitutions(test.config.substitutions)
filename = test.path_in_suite[-1]
# TODO(ldionne): We currently disable tests that re-define _LIBCPP_ASSERT
# when we run with modules enabled. Instead, we should
# split the part that does a death test outside of the
# test, and only disable that part when modules are
# enabled.
if '-fmodules' in test.config.available_features and self._disableWithModules(test, litConfig):
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test {} is unsupported when modules are enabled')
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ] # The steps are already in the script
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.run.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} ! %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
elif filename.endswith('.verify.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
return self._executeShTest(test, litConfig, steps)
# Make sure to check these ones last, since they will match other
# suffixes above too.
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
# This is like a .verify.cpp test when clang-verify is supported,
# otherwise it's like a .compile.fail.cpp test. This is only provided
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
if self._supportsVerify(test, litConfig):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
else:
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
# Utility function to add compile flags in lit.local.cfg files.
def addCompileFlags(self, config, *flags):
string = ' '.join(flags)
config.substitutions = [(s, x + ' ' + string) if s == '%{compile_flags}' else (s, x) for (s, x) in config.substitutions]
# Modified version of lit.TestRunner.executeShTest to handle custom parsers correctly.
def _executeShTest(self, test, litConfig, steps, fileDependencies=None):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
# Get the default substitutions
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
useExternalSh = True
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
# Add the %{build} and %{run} convenience substitutions
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
# Add the %{verify} substitution and the verify-support feature if Clang-verify is supported
if self._supportsVerify(test, litConfig):
test.config.available_features.add('verify-support')
substitutions.append(('%{verify}', '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = fileDependencies or []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
]
script = list(steps)
parsed = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not script)
if isinstance(parsed, lit.Test.Result):
return parsed
script += parsed
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
# Perform substitutions inside FILE_DEPENDENCIES lines (or injected dependencies).
# This allows using variables like %t in file dependencies. Also note that we really
# need to resolve %{file_dependencies} now, because otherwise we won't be able to
# make all paths absolute below.
fileDependencies = lit.TestRunner.applySubstitutions(fileDependencies, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
# Add the %{file_dependencies} substitution before we perform substitutions
# inside the script.
testDir = os.path.dirname(test.getSourcePath())
fileDependencies = [f if os.path.isabs(f) else os.path.join(testDir, f) for f in fileDependencies]
substitutions.append(('%{file_dependencies}', ' '.join(map(pipes.quote, fileDependencies))))
# Perform substitution in the script itself.
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
if litConfig.noExecute:
return lit.Test.Result(lit.Test.PASS)
else:
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)
| [
[
[
370,
373
],
[
455,
458
],
[
6531,
6534
],
[
7105,
7108
],
[
8246,
8249
],
[
8262,
8265
],
[
11299,
11302
],
[
11315,
11318
],
[
11887,
11890
],
[
11903,
11906
],
[
12015,
12018
],
[
12102,
12105
],
[
13007,
13010
],
[
13127,
13130
],
[
13259,
13262
],
[
13386,
13389
],
[
13568,
13571
],
[
13764,
13767
],
[
14419,
14422
],
[
15049,
15052
],
[
15262,
15265
],
[
15278,
15281
],
[
15326,
15329
]
],
[
[
381,
383
],
[
6143,
6145
],
[
6344,
6346
],
[
6398,
6400
],
[
7308,
7310
],
[
14732,
14734
],
[
14803,
14805
],
[
14825,
14827
]
],
[
[
391,
396
],
[
14944,
14949
]
],
[
[
404,
406
],
[
6447,
6449
],
[
8347,
8349
]
],
[
[
414,
424
],
[
7342,
7352
]
],
[
[
432,
454
]
]
] |
# -*- coding: utf-8 -*-
# Scrapy settings for innerwest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['innerwest.spiders']
NEWSPIDER_MODULE = 'innerwest.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'innerwest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'innerwest.middlewares.InnerwestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_selenium.SeleniumMiddleware': 800,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'innerwest.pipelines.InnerwestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = "../chromedriver"
SELENIUM_DRIVER_ARGUMENTS=['--headless']
#SELENIUM_DRIVER_ARGUMENTS=[]
FEED_EXPORT_ENCODING = 'utf-8' | [
[
[
424,
432
]
],
[
[
448,
462
]
],
[
[
487,
503
]
],
[
[
689,
703
]
],
[
[
1784,
1806
]
],
[
[
3097,
3117
]
],
[
[
3129,
3160
]
],
[
[
3181,
3206
]
],
[
[
3252,
3272
]
]
] |
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, Nikita Rudin
from time import time
import numpy as np
import os
from isaacgym.torch_utils import *
from isaacgym import gymtorch, gymapi, gymutil
import torch
# from torch.tensor import Tensor
from typing import Tuple, Dict
from legged_gym.envs import LeggedRobot
from legged_gym import LEGGED_GYM_ROOT_DIR
from .mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg
class Anymal(LeggedRobot):
cfg : AnymalCRoughCfg
def __init__(self, cfg, sim_params, physics_engine, sim_device, headless):
super().__init__(cfg, sim_params, physics_engine, sim_device, headless)
# load actuator network
if self.cfg.control.use_actuator_network:
actuator_network_path = self.cfg.control.actuator_net_file.format(LEGGED_GYM_ROOT_DIR=LEGGED_GYM_ROOT_DIR)
self.actuator_network = torch.jit.load(actuator_network_path).to(self.device)
def reset_idx(self, env_ids):
super().reset_idx(env_ids)
# Additionaly empty actuator network hidden states
self.sea_hidden_state_per_env[:, env_ids] = 0.
self.sea_cell_state_per_env[:, env_ids] = 0.
def _init_buffers(self):
super()._init_buffers()
# Additionally initialize actuator network hidden state tensors
self.sea_input = torch.zeros(self.num_envs*self.num_actions, 1, 2, device=self.device, requires_grad=False)
self.sea_hidden_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_cell_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_hidden_state_per_env = self.sea_hidden_state.view(2, self.num_envs, self.num_actions, 8)
self.sea_cell_state_per_env = self.sea_cell_state.view(2, self.num_envs, self.num_actions, 8)
def _compute_torques(self, actions):
# Choose between pd controller and actuator network
if self.cfg.control.use_actuator_network:
with torch.inference_mode():
self.sea_input[:, 0, 0] = (actions * self.cfg.control.action_scale + self.default_dof_pos - self.dof_pos).flatten()
self.sea_input[:, 0, 1] = self.dof_vel.flatten()
torques, (self.sea_hidden_state[:], self.sea_cell_state[:]) = self.actuator_network(self.sea_input, (self.sea_hidden_state, self.sea_cell_state))
return torques
else:
# pd controller
return super()._compute_torques(actions) | [
[
[
1680,
1684
]
],
[
[
1692,
1703
]
],
[
[
1711,
1713
]
],
[
[
1748,
1749
]
],
[
[
1771,
1779
]
],
[
[
1781,
1787
]
],
[
[
1789,
1796
]
],
[
[
1805,
1810
],
[
2477,
2482
],
[
2931,
2936
],
[
3054,
3059
],
[
3175,
3180
],
[
3643,
3648
]
],
[
[
1864,
1869
]
],
[
[
1871,
1875
]
],
[
[
1905,
1916
],
[
2040,
2051
]
],
[
[
1940,
1959
],
[
2420,
2439
]
],
[
[
2010,
2025
],
[
2064,
2079
]
],
[
[
2033,
2039
]
]
] |
class RNNConfig(object):
"""
Holds logistic regression model hyperparams.
:param height: image height
:type heights: int
:param width: image width
:type width: int
:param channels: image channels
:type channels: int
:param batch_size: batch size for training
:type batch_size: int
:param epochs: number of epochs
:type epochs: int
:param save_step: when step % save_step == 0, the model
parameters are saved.
:type save_step: int
:param learning_rate: learning rate for the optimizer
:type learning_rate: float
:param momentum: momentum param
:type momentum: float
"""
def __init__(self,
vocab_size=25000,
batch_size=32,
embedding_dim=100,
rnn_dim=100,
output_dim=2,
layers=1,
epochs=8,
learning_rate=0.01,
momentum=0.2,
bidirectional=False,
opt="sgd",
drop=0):
self.vocab_size = vocab_size
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.rnn_dim = rnn_dim
self.layers = layers
self.output_dim = output_dim
self.epochs = epochs
self.learning_rate = learning_rate
self.momentum = momentum
self.bidirectional = bidirectional
self.opt = opt
self.drop = drop
def __str__(self):
"""
Get all attributs values.
:return: all hyperparams as a string
:rtype: str
"""
status = "vocab_size = {}\n".format(self.vocab_size)
status += "batch_size = {}\n".format(self.batch_size)
status += "embedding_dim = {}\n".format(self.embedding_dim)
status += "rnn_dim = {}\n".format(self.rnn_dim)
status += "layers = {}\n".format(self.layers)
status += "output_dim = {}\n".format(self.output_dim)
status += "epochs = {}\n".format(self.epochs)
status += "learning_rate = {}\n".format(self.learning_rate)
status += "momentum = {}\n".format(self.momentum)
status += "bidirectional = {}\n".format(self.bidirectional)
status += "opt = {}\n".format(self.opt)
status += "drop = {}\n".format(self.drop)
return status
| [
[
[
6,
15
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.