text
stringlengths 0
1.05M
| meta
dict |
---|---|
# $Id: TemplateExpand.py 1053 2008-07-30 12:03:29Z landrgr1 $
#
# Created by Greg Landrum August, 2006
#
#
from __future__ import print_function
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem.ChemUtils.AlignDepict import AlignDepict
import sys
_version = "0.8.0"
_greet = "This is TemplateExpand version %s" % _version
_usage = """
Usage: TemplateExpand [options] template <sidechains>
Unless otherwise indicated, the template and sidechains are assumed to be
Smiles
Each sidechain entry should be:
[-r] SMARTS filename
The SMARTS pattern is used to recognize the attachment point,
if the -r argument is not provided, then atoms matching the pattern
will be removed from the sidechains.
or
-n filename
where the attachment atom is the first atom in each molecule
The filename provides the list of potential sidechains.
options:
-o filename.sdf: provides the name of the output file, otherwise
stdout is used
--sdf : expect the sidechains to be in SD files
--moltemplate: the template(s) are in a mol/SD file, new depiction(s)
will not be generated unless the --redraw argument is also
provided
--smilesFileTemplate: extract the template(s) from a SMILES file instead of
expecting SMILES on the command line.
--redraw: generate a new depiction for the molecular template(s)
--useall:
or
--useallmatches: generate a product for each possible match of the attachment
pattern to each sidechain. If this is not provided, the first
match (not canonically defined) will be used.
--force: by default, the program prompts the user if the library is
going to contain more than 1000 compounds. This argument
disables the prompt.
--templateSmarts="smarts": provides a space-delimited list containing the SMARTS
patterns to be used to recognize attachment points in
the template
--autoNames: when set this toggle causes the resulting compounds to be named
based on there sequence id in the file, e.g.
"TemplateEnum: Mol_1", "TemplateEnum: Mol_2", etc.
otherwise the names of the template and building blocks (from
the input files) will be combined to form a name for each
product molecule.
--3D : Generate 3d coordinates for the product molecules instead of 2d coordinates,
requires the --moltemplate option
--tether : refine the 3d conformations using a tethered minimization
"""
def Usage():
print(_usage, file=sys.stderr)
sys.exit(-1)
#pylint: disable=C0111,C0103,C0322,C0324,C0323
nDumped = 0
def _exploder(mol, depth, sidechains, core, chainIndices, autoNames=True, templateName='',
resetCounter=True, do3D=False, useTethers=False):
global nDumped
if resetCounter:
nDumped = 0
ourChains = sidechains[depth]
patt = '[%d*]' % (depth + 1)
patt = Chem.MolFromSmiles(patt)
for i, (chainIdx, chain) in enumerate(ourChains):
tchain = chainIndices[:]
tchain.append((i, chainIdx))
rs = Chem.ReplaceSubstructs(mol, patt, chain, replaceAll=True)
if rs:
r = rs[0]
if depth < len(sidechains) - 1:
for entry in _exploder(r, depth + 1, sidechains, core, tchain, autoNames=autoNames,
templateName=templateName, resetCounter=0, do3D=do3D,
useTethers=useTethers):
yield entry
else:
try:
Chem.SanitizeMol(r)
except ValueError:
import traceback
traceback.print_exc()
continue
if not do3D:
if r.HasSubstructMatch(core):
try:
AlignDepict(r, core)
except Exception:
import traceback
traceback.print_exc()
print(Chem.MolToSmiles(r), file=sys.stderr)
else:
print('>>> no match', file=sys.stderr)
AllChem.Compute2DCoords(r)
else:
r = Chem.AddHs(r)
AllChem.ConstrainedEmbed(r, core, useTethers)
Chem.Kekulize(r)
if autoNames:
tName = "TemplateEnum: Mol_%d" % (nDumped + 1)
else:
tName = templateName
for bbI, bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
tName += '_' + bbNm
r.SetProp("_Name", tName)
r.SetProp('seq_num', str(nDumped + 1))
r.SetProp('reagent_indices', '_'.join([str(x[1]) for x in tchain]))
for bbI, bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
r.SetProp('building_block_%d' % (bbI + 1), bbNm)
r.SetIntProp('_idx_building_block_%d' % (bbI + 1), bb[1])
for propN in bbMol.GetPropNames():
r.SetProp('building_block_%d_%s' % (bbI + 1, propN), bbMol.GetProp(propN))
nDumped += 1
if not nDumped % 100:
logger.info('Done %d molecules' % nDumped)
yield r
def Explode(template, sidechains, outF, autoNames=True, do3D=False, useTethers=False):
chainIndices = []
core = Chem.DeleteSubstructs(template, Chem.MolFromSmiles('[*]'))
try:
templateName = template.GetProp('_Name')
except KeyError:
templateName = "template"
for mol in _exploder(template, 0, sidechains, core, chainIndices, autoNames=autoNames,
templateName=templateName, do3D=do3D, useTethers=useTethers):
outF.write(Chem.MolToMolBlock(mol))
for pN in mol.GetPropNames():
print('> <%s>\n%s\n' % (pN, mol.GetProp(pN)), file=outF)
print('$$$$', file=outF)
def MoveDummyNeighborsToBeginning(mol, useAll=False):
dummyPatt = Chem.MolFromSmiles('[*]')
matches = mol.GetSubstructMatches(dummyPatt)
res = []
for match in matches:
matchIdx = match[0]
smi = Chem.MolToSmiles(mol, True, rootedAtAtom=matchIdx)
entry = Chem.MolFromSmiles(smi)
# entry now has [*] as atom 0 and the neighbor
# as atom 1. Cleave the [*]:
entry = Chem.DeleteSubstructs(entry, dummyPatt)
for propN in mol.GetPropNames():
entry.SetProp(propN, mol.GetProp(propN))
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
res.append(entry)
if not useAll:
break
return res
def ConstructSidechains(suppl, sma=None, replace=True, useAll=False):
if sma:
patt = Chem.MolFromSmarts(sma)
if patt is None:
logger.error('could not construct pattern from smarts: %s' % sma, exc_info=True)
return None
else:
patt = None
if replace:
replacement = Chem.MolFromSmiles('[*]')
res = []
for idx, mol in enumerate(suppl):
if not mol:
continue
if patt:
if not mol.HasSubstructMatch(patt):
logger.warning(
'The substructure pattern did not match sidechain %d. This may result in errors.' %
(idx + 1))
if replace:
tmp = list(Chem.ReplaceSubstructs(mol, patt, replacement))
if not useAll:
tmp = [tmp[0]]
for i, entry in enumerate(tmp):
entry = MoveDummyNeighborsToBeginning(entry)
if not entry:
continue
entry = entry[0]
for propN in mol.GetPropNames():
entry.SetProp(propN, mol.GetProp(propN))
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp[i] = (idx + 1, entry)
else:
# no replacement, use the pattern to reorder
# atoms only:
matches = mol.GetSubstructMatches(patt)
if matches:
tmp = []
for match in matches:
smi = Chem.MolToSmiles(mol, True, rootedAtAtom=match[0])
entry = Chem.MolFromSmiles(smi)
for propN in mol.GetPropNames():
entry.SetProp(propN, mol.GetProp(propN))
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp.append((idx + 1, entry))
else:
tmp = None
else:
tmp = [(idx + 1, mol)]
if tmp:
res.extend(tmp)
return res
if __name__ == '__main__':
import getopt
print(_greet, file=sys.stderr)
try:
args, extras = getopt.getopt(sys.argv[1:], 'o:h', [
'sdf',
'moltemplate',
'molTemplate',
'smilesFileTemplate',
'templateSmarts=',
'redraw',
'force',
'useall',
'useallmatches',
'autoNames',
'3D',
'3d',
'tethers',
'tether',
])
except Exception:
import traceback
traceback.print_exc()
Usage()
if len(extras) < 3:
Usage()
tooLong = 1000
sdLigands = False
molTemplate = False
redrawTemplate = False
outF = None
forceIt = False
useAll = False
templateSmarts = []
smilesFileTemplate = False
autoNames = False
do3D = False
useTethers = False
for arg, val in args:
if arg == '-o':
outF = val
elif arg == '--sdf':
sdLigands = True
elif arg in ('--moltemplate', '--molTemplate'):
molTemplate = True
elif arg == '--smilesFileTemplate':
smilesFileTemplate = True
elif arg == '--templateSmarts':
templateSmarts = val
elif arg == '--redraw':
redrawTemplate = True
elif arg == '--force':
forceIt = True
elif arg == '--autoNames':
autoNames = True
elif arg in ('--useall', '--useallmatches'):
useAll = True
elif arg in ('--3D', '--3d'):
do3D = True
elif arg in ('--tethers', '--tether'):
useTethers = True
elif arg == '-h':
Usage()
sys.exit(0)
if do3D:
if not molTemplate:
raise ValueError('the --3D option is only useable in combination with --moltemplate')
if redrawTemplate:
logger.warning(
'--redrawTemplate does not make sense in combination with --molTemplate. removing it')
redrawTemplate = False
if templateSmarts:
splitL = templateSmarts.split(' ') #pylint: disable=E1103
templateSmarts = []
for i, sma in enumerate(splitL):
patt = Chem.MolFromSmarts(sma)
if not patt:
raise ValueError('could not convert smarts "%s" to a query' % sma)
if i >= 4:
i += 1
replace = Chem.MolFromSmiles('[%d*]' % (i + 1))
templateSmarts.append((patt, replace))
if molTemplate:
removeHs = not do3D
try:
s = Chem.SDMolSupplier(extras[0], removeHs=removeHs)
templates = [x for x in s]
except Exception:
logger.error('Could not construct templates from input file: %s' % extras[0], exc_info=True)
sys.exit(1)
if redrawTemplate:
for template in templates:
AllChem.Compute2DCoords(template)
else:
if not smilesFileTemplate:
try:
templates = [Chem.MolFromSmiles(extras[0])]
except Exception:
logger.error('Could not construct template from smiles: %s' % extras[0], exc_info=True)
sys.exit(1)
else:
try:
s = Chem.SmilesMolSupplier(extras[0], titleLine=False)
templates = [x for x in s]
except Exception:
logger.error('Could not construct templates from input file: %s' % extras[0], exc_info=True)
sys.exit(1)
for template in templates:
AllChem.Compute2DCoords(template)
if templateSmarts:
finalTs = []
for i, template in enumerate(templates):
for j, (patt, replace) in enumerate(templateSmarts):
if not template.HasSubstructMatch(patt):
logger.error('template %d did not match sidechain pattern %d, skipping it' %
(i + 1, j + 1))
template = None
break
template = Chem.ReplaceSubstructs(template, patt, replace)[0]
if template:
Chem.SanitizeMol(template)
finalTs.append(template)
templates = finalTs
sidechains = []
pos = 1
while pos < len(extras):
if extras[pos] == '-r':
replaceIt = False
pos += 1
else:
replaceIt = True
if extras[pos] == '-n':
sma = None
else:
sma = extras[pos]
pos += 1
try:
dat = extras[pos]
except IndexError:
logger.error('missing a sidechain filename')
sys.exit(-1)
pos += 1
if sdLigands:
try:
suppl = Chem.SDMolSupplier(dat)
except Exception:
logger.error('could not construct supplier from SD file: %s' % dat, exc_info=True)
suppl = []
else:
tmpF = file(dat, 'r')
inL = tmpF.readline()
if len(inL.split(' ')) < 2:
nmCol = -1
else:
nmCol = 1
try:
suppl = Chem.SmilesMolSupplier(dat, nameColumn=nmCol)
except Exception:
logger.error('could not construct supplier from smiles file: %s' % dat, exc_info=True)
suppl = []
suppl = [x for x in suppl]
chains = ConstructSidechains(suppl, sma=sma, replace=replaceIt, useAll=useAll)
if chains:
sidechains.append(chains)
count = 1
for chain in sidechains:
count *= len(chain)
count *= len(templates)
if not sidechains or not count:
print("No molecules to be generated.", file=sys.stderr)
sys.exit(0)
if not forceIt and count > tooLong:
print("This will generate %d molecules." % count, file=sys.stderr)
print("Continue anyway? [no] ", file=sys.stderr, end='')
sys.stderr.flush()
ans = sys.stdin.readline().strip()
if ans not in ('y', 'yes', 'Y', 'YES'):
sys.exit(0)
if outF and outF != "-":
try:
outF = file(outF, 'w+')
except IOError:
logger.error('could not open file %s for writing' % (outF), exc_info=True)
else:
outF = sys.stdout
for template in templates:
Explode(template, sidechains, outF, autoNames=autoNames, do3D=do3D, useTethers=useTethers)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/ChemUtils/TemplateExpand.py",
"copies": "1",
"size": "14480",
"license": "bsd-3-clause",
"hash": 5554955400484881000,
"line_mean": 30.9646799117,
"line_max": 101,
"alpha_frac": 0.5944060773,
"autogenerated": false,
"ratio": 3.5885997521685256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586001671329201,
"avg_score": 0.019400831627864986,
"num_lines": 453
} |
# $Id: TemplateExpand.py 1053 2008-07-30 12:03:29Z landrgr1 $
#
# Created by Greg Landrum August, 2006
#
#
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import AllChem
from rdkit.Chem.ChemUtils.AlignDepict import AlignDepict
import sys
_version="0.8.0"
_greet="This is TemplateExpand version %s"%_version
_usage="""
Usage: TemplateExpand [options] template <sidechains>
Unless otherwise indicated, the template and sidechains are assumed to be
Smiles
Each sidechain entry should be:
[-r] SMARTS filename
The SMARTS pattern is used to recognize the attachment point,
if the -r argument is not provided, then atoms matching the pattern
will be removed from the sidechains.
or
-n filename
where the attachment atom is the first atom in each molecule
The filename provides the list of potential sidechains.
options:
-o filename.sdf: provides the name of the output file, otherwise
stdout is used
--sdf : expect the sidechains to be in SD files
--moltemplate: the template(s) are in a mol/SD file, new depiction(s)
will not be generated unless the --redraw argument is also
provided
--smilesFileTemplate: extract the template(s) from a SMILES file instead of
expecting SMILES on the command line.
--redraw: generate a new depiction for the molecular template(s)
--useall:
or
--useallmatches: generate a product for each possible match of the attachment
pattern to each sidechain. If this is not provided, the first
match (not canonically defined) will be used.
--force: by default, the program prompts the user if the library is
going to contain more than 1000 compounds. This argument
disables the prompt.
--templateSmarts="smarts": provides a space-delimited list containing the SMARTS
patterns to be used to recognize attachment points in
the template
--autoNames: when set this toggle causes the resulting compounds to be named
based on there sequence id in the file, e.g.
"TemplateEnum: Mol_1", "TemplateEnum: Mol_2", etc.
otherwise the names of the template and building blocks (from
the input files) will be combined to form a name for each
product molecule.
--3D : Generate 3d coordinates for the product molecules instead of 2d coordinates,
requires the --moltemplate option
--tether : refine the 3d conformations using a tethered minimization
"""
def Usage():
import sys
print >>sys.stderr,_usage
sys.exit(-1)
nDumped=0
def _exploder(mol,depth,sidechains,core,chainIndices,autoNames=True,templateName='',
resetCounter=True,do3D=False,useTethers=False):
global nDumped
if resetCounter:
nDumped=0
ourChains = sidechains[depth]
patt = '[%d*]'%(depth+1)
patt = Chem.MolFromSmiles(patt)
for i,(chainIdx,chain) in enumerate(ourChains):
tchain = chainIndices[:]
tchain.append((i,chainIdx))
rs = Chem.ReplaceSubstructs(mol,patt,chain,replaceAll=True)
if rs:
r = rs[0]
if depth<len(sidechains)-1:
for entry in _exploder(r,depth+1,sidechains,core,tchain,
autoNames=autoNames,templateName=templateName,
resetCounter=0,do3D=do3D,useTethers=useTethers):
yield entry
else:
try:
Chem.SanitizeMol(r)
except ValueError:
import traceback
traceback.print_exc()
continue
if not do3D:
if r.HasSubstructMatch(core):
try:
AlignDepict(r,core)
except:
import traceback
traceback.print_exc()
print >>sys.stderr,Chem.MolToSmiles(r)
else:
print >>sys.stderr,'>>> no match'
AllChem.Compute2DCoords(r)
else:
r = Chem.AddHs(r)
AllChem.ConstrainedEmbed(r,core,useTethers)
Chem.Kekulize(r)
if autoNames:
tName = "TemplateEnum: Mol_%d"%(nDumped+1)
else:
tName = templateName
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
tName += '_' + bbNm
r.SetProp("_Name",tName)
r.SetProp('seq_num',str(nDumped+1))
r.SetProp('reagent_indices','_'.join([str(x[1]) for x in tchain]))
for bbI,bb in enumerate(tchain):
bbMol = sidechains[bbI][bb[0]][1]
if bbMol.HasProp('_Name'):
bbNm = bbMol.GetProp('_Name')
else:
bbNm = str(bb[1])
r.SetProp('building_block_%d'%(bbI+1),bbNm)
for propN in bbMol.GetPropNames():
r.SetProp('building_block_%d_%s'%(bbI+1,propN),bbMol.GetProp(propN))
nDumped += 1
if not nDumped%100:
logger.info('Done %d molecules'%nDumped)
yield r
def Explode(template,sidechains,outF,autoNames=True,do3D=False,useTethers=False):
chainIndices=[]
core = Chem.DeleteSubstructs(template,Chem.MolFromSmiles('[*]'))
try:
templateName = template.GetProp('_Name')
except KeyError:
templateName="template"
for mol in _exploder(template,0,sidechains,core,chainIndices,autoNames=autoNames,
templateName=templateName,do3D=do3D,useTethers=useTethers):
outF.write(Chem.MolToMolBlock(mol))
for pN in mol.GetPropNames():
print >>outF,'> <%s>\n%s\n'%(pN,mol.GetProp(pN))
print >>outF,'$$$$'
def MoveDummyNeighborsToBeginning(mol,useAll=False):
dummyPatt=Chem.MolFromSmiles('[*]')
matches = mol.GetSubstructMatches(dummyPatt)
res = []
for match in matches:
matchIdx = match[0]
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=matchIdx)
entry = Chem.MolFromSmiles(smi)
# entry now has [*] as atom 0 and the neighbor
# as atom 1. Cleave the [*]:
entry = Chem.DeleteSubstructs(entry,dummyPatt)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
res.append(entry)
if not useAll:
break
return res
def ConstructSidechains(suppl,sma=None,replace=True,useAll=False):
if sma:
try:
patt = Chem.MolFromSmarts(sma)
except:
logger.error('could not construct pattern from smarts: %s'%sma,
exc_info=True)
return None
else:
patt = None
if replace:
replacement = Chem.MolFromSmiles('[*]')
res = []
for idx,mol in enumerate(suppl):
if not mol:
continue
if patt:
if not mol.HasSubstructMatch(patt):
logger.warning('The substructure pattern did not match sidechain %d. This may result in errors.'%(idx+1))
if replace:
tmp = list(Chem.ReplaceSubstructs(mol,patt,replacement))
if not useAll: tmp = [tmp[0]]
for i,entry in enumerate(tmp):
entry = MoveDummyNeighborsToBeginning(entry)
if not entry:
continue
entry = entry[0]
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp[i] = (idx+1,entry)
else:
# no replacement, use the pattern to reorder
# atoms only:
matches = mol.GetSubstructMatches(patt)
if matches:
tmp = []
for match in matches:
smi = Chem.MolToSmiles(mol,True,rootedAtAtom=match[0])
entry = Chem.MolFromSmiles(smi)
for propN in mol.GetPropNames():
entry.SetProp(propN,mol.GetProp(propN));
# now we have a molecule with the atom to be joined
# in position zero; Keep that:
tmp.append((idx+1,entry))
else:
tmp = None
else:
tmp = [(idx+1,mol)]
if tmp:
res.extend(tmp)
return res
if __name__=='__main__':
import getopt,sys
print >>sys.stderr,_greet
try:
args,extras = getopt.getopt(sys.argv[1:],'o:h',[
'sdf',
'moltemplate',
'molTemplate',
'smilesFileTemplate',
'templateSmarts=',
'redraw',
'force',
'useall',
'useallmatches',
'autoNames',
'3D','3d',
'tethers',
'tether',
])
except:
import traceback
traceback.print_exc()
Usage()
if len(extras)<3:
Usage()
tooLong=1000
sdLigands=False
molTemplate=False
redrawTemplate=False
outF=None
forceIt=False
useAll=False
templateSmarts=[]
smilesFileTemplate=False
autoNames=False
do3D=False
useTethers=False
for arg,val in args:
if arg=='-o':
outF=val
elif arg=='--sdf':
sdLigands=True
elif arg in ('--moltemplate','--molTemplate'):
molTemplate=True
elif arg=='--smilesFileTemplate':
smilesFileTemplate=True
elif arg=='--templateSmarts':
templateSmarts = val
elif arg=='--redraw':
redrawTemplate=True
elif arg=='--force':
forceIt=True
elif arg=='--autoNames':
autoNames=True
elif arg in ('--useall','--useallmatches'):
useAll=True
elif arg in ('--3D','--3d'):
do3D=True
elif arg in ('--tethers','--tether'):
useTethers=True
elif arg=='-h':
Usage()
sys.exit(0)
if do3D:
if not molTemplate:
raise ValueError,'the --3D option is only useable in combination with --moltemplate'
if redrawTemplate:
logger.warning('--redrawTemplate does not make sense in combination with --molTemplate. removing it')
redrawTemplate=False
if templateSmarts:
splitL = templateSmarts.split(' ')
templateSmarts = []
for i,sma in enumerate(splitL):
patt = Chem.MolFromSmarts(sma)
if not patt:
raise ValueError,'could not convert smarts "%s" to a query'%sma
if i>=4:
i+=1
replace = Chem.MolFromSmiles('[%d*]'%(i+1))
templateSmarts.append((patt,replace))
if molTemplate:
removeHs = not do3D
try:
s = Chem.SDMolSupplier(extras[0],removeHs=removeHs)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
if redrawTemplate:
for template in templates:
AllChem.Compute2DCoords(template)
else:
if not smilesFileTemplate:
try:
templates = [Chem.MolFromSmiles(extras[0])]
except:
logger.error('Could not construct template from smiles: %s'%extras[0],
exc_info=True)
sys.exit(1)
else:
try:
s = Chem.SmilesMolSupplier(extras[0],titleLine=False)
templates = [x for x in s]
except:
logger.error('Could not construct templates from input file: %s'%extras[0],
exc_info=True)
sys.exit(1)
for template in templates:
AllChem.Compute2DCoords(template)
if templateSmarts:
finalTs = []
for i,template in enumerate(templates):
for j,(patt,replace) in enumerate(templateSmarts):
if not template.HasSubstructMatch(patt):
logger.error('template %d did not match sidechain pattern %d, skipping it'%(i+1,j+1))
template =None
break
template = Chem.ReplaceSubstructs(template,patt,replace)[0]
if template:
Chem.SanitizeMol(template)
finalTs.append(template)
templates = finalTs
sidechains = []
pos = 1
while pos<len(extras):
if extras[pos]=='-r':
replaceIt=False
pos += 1
else:
replaceIt=True
if extras[pos]=='-n':
sma = None
else:
sma = extras[pos]
pos += 1
try:
dat = extras[pos]
except IndexError:
logger.error('missing a sidechain filename')
sys.exit(-1)
pos += 1
if sdLigands:
try:
suppl = Chem.SDMolSupplier(dat)
except:
logger.error('could not construct supplier from SD file: %s'%dat,
exc_info=True)
suppl = []
else:
tmpF = file(dat,'r')
inL = tmpF.readline()
if len(inL.split(' '))<2:
nmCol=-1
else:
nmCol=1
try:
suppl = Chem.SmilesMolSupplier(dat,nameColumn=nmCol)
except:
logger.error('could not construct supplier from smiles file: %s'%dat,
exc_info=True)
suppl = []
suppl = [x for x in suppl]
chains = ConstructSidechains(suppl,sma=sma,replace=replaceIt,useAll=useAll)
if chains:
sidechains.append(chains)
count = 1
for chain in sidechains:
count *= len(chain)
count *= len(templates)
if not sidechains or not count:
print >>sys.stderr,"No molecules to be generated."
sys.exit(0)
if not forceIt and count>tooLong:
print >>sys.stderr,"This will generate %d molecules."%count
print >>sys.stderr,"Continue anyway? [no] ",
sys.stderr.flush()
ans = sys.stdin.readline().strip()
if ans not in ('y','yes','Y','YES'):
sys.exit(0)
if outF and outF!="-":
try:
outF = file(outF,'w+')
except IOError:
logger.error('could not open file %s for writing'%(outF),
exc_info=True)
else:
outF = sys.stdout
for template in templates:
Explode(template,sidechains,outF,autoNames=autoNames,do3D=do3D,
useTethers=useTethers)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/ChemUtils/TemplateExpand.py",
"copies": "2",
"size": "14044",
"license": "bsd-3-clause",
"hash": 3479630471818914000,
"line_mean": 30.0022075055,
"line_max": 113,
"alpha_frac": 0.597692965,
"autogenerated": false,
"ratio": 3.601025641025641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03848920416003707,
"num_lines": 453
} |
# $Id: TestAll.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging,re, StringIO, os, logging, subprocess
from os.path import normpath, abspath
sys.path.append(".")
from TestConfig import TestConfig
sys.path.append("..")
from MiscLib import TestUtils
try:
# Running Python 2.5 with simplejson?
import simplejson as json
except ImportError:
import json as json
logger = logging.getLogger("TestAdminInterfaceRestfulAPIs")
class TestAdminInterfaceRestfulAPIs(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Tests
# Test that the Admin Interface API: /user/[AdmiralUserID] API
def testAdmiralModifyUserDetails(self):
url = TestConfig.HostName + "/user/" + TestConfig.UserName
inputJsonFileName = TestConfig.UserName+".json"
curlcommand = "curl " + url + " -u " + TestConfig.RemoteUserName+":"+ TestConfig.RemoteUserPass +" --data-binary @"+inputJsonFileName+" -X PUT "
print curlcommand
cmdOutput = subprocess.Popen(curlcommand, shell=True, stdout=subprocess.PIPE)
expectedOutput = '{"Update": "Successful"}'
print "expectedOutput = " + expectedOutput
actualOutput = cmdOutput.stdout.read()
print "actualOutput = " + actualOutput
assert (actualOutput==expectedOutput), "Actual Response=" + actualOutput +" while Expected response= "+expectedOutput
return
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ #"testUnits"
"testAdmiralModifyUserDetails"
],
"component":
[ #"testComponents"
],
"integration":
[ #"testIntegration"
],
"pending":
[ #"testPending"
]
}
return TestUtils.getTestSuite(TestAdminInterfaceRestfulAPIs, testdict, select=select)
if __name__ == "__main__":
#logging.basicConfig(level=logging.DEBUG)
TestUtils.runTests("TestAdminInterfaceRestfulAPIs.log", getTestSuite, sys.argv)
#runner = unittest.TextTestRunner()
#runner.run(getTestSuite()) | {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "src/AdminUIHandler/tests/TestAdminInterfaceRestfulAPIs.py",
"copies": "2",
"size": "2640",
"license": "mit",
"hash": 1540063455722585600,
"line_mean": 32.0125,
"line_max": 153,
"alpha_frac": 0.6367424242,
"autogenerated": false,
"ratio": 4.16403785488959,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5800780279089589,
"avg_score": null,
"num_lines": null
} |
# $Id: TestAll.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging, zipfile, re, StringIO, os, logging, cgi
from os.path import normpath, abspath
from rdflib import URIRef
sys.path.append("..")
sys.path.append("../cgi-bin")
try:
# Running Python 2.5 with simplejson?
import simplejson as json
except ImportError:
import json as json
import GetDatasetMetadataHandler, ManifestRDFUtils, SubmitDatasetUtils, TestConfig
from MiscLib import TestUtils
Logger = logging.getLogger("TestGetDatasetMetadataHandler")
class TestGetDatasetMetadataHandler(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Tests
# Test that the GetMetResponse
def testGetDatasetMetadataResponse(self):
outputStr = StringIO.StringIO()
# Create a manifest file from mocked up form data
ManifestRDFUtils.writeToManifestFile(TestConfig.ManifestFilePath, TestConfig.NamespaceDictionary,TestConfig.ElementUriList, TestConfig.ElementValueList)
# Invoke get metatadata submission program, passing faked dataset directory
GetDatasetMetadataHandler.getDatasetMetadata(TestConfig.formdata, TestConfig.ManifestName, outputStr)
outputStr.seek(0, os.SEEK_SET)
firstLine = outputStr.readline()
self.assertEqual( firstLine, "Content-type: application/JSON\n", "Expected Metadata as application/JSON")
Logger.debug("Output String from output stream: " + outputStr.getvalue())
# Check retrieving metadata
metadata = json.load(outputStr)
Logger.debug("Metadata Length = "+ repr(len(metadata)))
self.assertEquals(len(metadata), 4, "Expected 4 pairs of field-values to be returned")
return
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ #"testUnits"
"testGetDatasetMetadataResponse"
],
"component":
[ #"testComponents"
],
"integration":
[ #"testIntegration"
],
"pending":
[ #"testPending"
]
}
return TestUtils.getTestSuite(TestGetDatasetMetadataHandler, testdict, select=select)
if __name__ == "__main__":
#logging.basicConfig(level=logging.DEBUG)
TestConfig.setDatasetsBaseDir(".")
TestUtils.runTests("TestGetDatasetMetadataHandler.log", getTestSuite, sys.argv) | {
"repo_name": "bhavanaananda/DataStage",
"path": "src/SubmitDatasetHandler/tests/TestGetDatasetMetadataHandler.py",
"copies": "2",
"size": "2943",
"license": "mit",
"hash": -5524040749212299000,
"line_mean": 32.0786516854,
"line_max": 160,
"alpha_frac": 0.6598708801,
"autogenerated": false,
"ratio": 4.36,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.60198708801,
"avg_score": null,
"num_lines": null
} |
# $Id: TestAll.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging
# Add main library directory to python path
sys.path.append("../..")
import TestFilePrivateArea
import TestFileSharedArea
import TestFileCollabArea
import TestDeletedUserCheckFileAccess
import TestFileDefaultArea
# import TestWebDAVAccess
# import TestWebDAVbyHTTP
# Code to run unit tests from all library test modules
def getTestSuite(select="all"):
suite = unittest.TestSuite()
suite.addTest(TestFilePrivateArea.getTestSuite(select=select))
suite.addTest(TestFileSharedArea.getTestSuite(select=select))
suite.addTest(TestFileCollabArea.getTestSuite(select=select))
suite.addTest(TestDeletedUserCheckFileAccess.getTestSuite(select=select))
suite.addTest(TestFileDefaultArea.getTestSuite(select=select))
# suite.addTest(TestWebDAVAccess.getTestSuite(select=select))
# suite.addTest(TestWebDAVbyHTTP.getTestSuite(select=select))
return suite
from MiscLib import TestUtils
import junitxml
if __name__ == "__main__":
print "============================================================"
print "This test suite needs to run under a Linux operating system"
print "Edit TestConfig.py to specify hostname and other parameters"
print "Create test accounts on target system to match TestConfig.py"
print "============================================================"
if len(sys.argv) >= 2 and sys.argv[1] == "xml":
with open('xmlresults.xml', 'w') as report:
result = junitxml.JUnitXmlResult(report)
result.startTestRun()
getTestSuite().run(result)
result.stopTestRun()
else:
TestUtils.runTests("TestAll", getTestSuite, sys.argv)
# End.
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/FileShare/tests/TestAll.py",
"copies": "1",
"size": "1864",
"license": "mit",
"hash": -4946280272011135000,
"line_mean": 35.5490196078,
"line_max": 77,
"alpha_frac": 0.688304721,
"autogenerated": false,
"ratio": 3.932489451476793,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5120794172476794,
"avg_score": null,
"num_lines": null
} |
# $Id: TestAll.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging, zipfile, re
from os.path import normpath
# Add main library directory to python path
sys.path.append("../..")
from MiscLib.ScanFiles import *
class ZipDirectory(unittest.TestCase):
def setUp(self):
self.testpath = "."
self.testpatt = re.compile("^.*$(?<!\.zip)")
return
def tearDown(self):
return
def doZip(self):
files = CollectFiles(self.testpath,self.testpatt)
z = zipfile.ZipFile('test.zip','w')
for i in files:
n = joinDirName(i[0], i[1])
z.write(n)
z.close()
z = zipfile.ZipFile('test.zip')
z.testzip()
z.printdir()
z.close()
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(ZipDirectory("doZip"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/ZipTest/tests/ZipDirectory.py",
"copies": "2",
"size": "1078",
"license": "mit",
"hash": 2145389650007453000,
"line_mean": 24.0697674419,
"line_max": 60,
"alpha_frac": 0.6103896104,
"autogenerated": false,
"ratio": 3.47741935483871,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006186522167383034,
"num_lines": 43
} |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import unittest
sys.path.append("../..")
from MiscUtils.Combinators import *
class TestCombinators(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testApply(self):
# Is function application like BCPL? (fn can be a variable)
def ap(f,v): return f(v)
def inc(n): return n+1
assert ap(inc,2)==3
def testCurry(self):
def f(a,b,c): return a+b+c
g = curry(f,1,2)
assert g(3) == 6
def testCompose(self):
def f(a,b,c): return a+b+c
def g(a,b): return a*b
h = compose(f,g,1000,200)
assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4))
# Code to run unit tests directly from command line.
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCombinators("testApply"))
suite.addTest(TestCombinators("testCurry"))
suite.addTest(TestCombinators("testCompose"))
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/tests/TestCombinators.py",
"copies": "1",
"size": "1447",
"license": "mit",
"hash": 8561528178223112000,
"line_mean": 25.3090909091,
"line_max": 75,
"alpha_frac": 0.6233586731,
"autogenerated": false,
"ratio": 3.1593886462882095,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9207847605407908,
"avg_score": 0.014979942796060262,
"num_lines": 55
} |
# $Id: TestCombinators.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library combinators
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys
import unittest
sys.path.append("../..")
from MiscLib.Combinators import *
class TestCombinators(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testApply(self):
# Is function application like BCPL? (fn can be a variable)
def ap(f,v): return f(v)
def inc(n): return n+1
assert ap(inc,2)==3
def testCurry(self):
def f(a,b,c): return a+b+c
g = curry(f,1,2)
assert g(3) == 6
def testCompose(self):
def f(a,b,c): return a+b+c
def g(a,b): return a*b
h = compose(f,g,1000,200)
assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4))
# Code to run unit tests directly from command line.
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCombinators("testApply"))
suite.addTest(TestCombinators("testCurry"))
suite.addTest(TestCombinators("testCompose"))
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| {
"repo_name": "bhavanaananda/DataStage",
"path": "src/AdminUIHandler/MiscLib/tests/TestCombinators.py",
"copies": "8",
"size": "1261",
"license": "mit",
"hash": 4510261431196569600,
"line_mean": 23.7254901961,
"line_max": 68,
"alpha_frac": 0.6201427439,
"autogenerated": false,
"ratio": 3.208651399491094,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7828794143391095,
"avg_score": null,
"num_lines": null
} |
# $Id: TestFileCIFSwriteHTTPread.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for TestFileCIFSwriteHTTPread module
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
import TestHttpUtils
class TestFileCIFSwriteHTTPread(unittest.TestCase):
def do_HTTP_redirect(self, opener, method, uri, data, content_type):
return TestHttpUtils.do_HTTP_redirect(opener, method, uri, data, content_type)
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testSharedUserCIFS(self):
mountcommand = ( '/sbin/mount.cifs //%(host)s/%(share)s/ %(mountpt)s -o rw,user=%(user)s,password=%(pass)s,nounix,forcedirectio' %
{ 'host': TestConfig.hostname
, 'share': TestConfig.cifssharename
, 'userA': TestConfig.userAname
, 'user': TestConfig.userAname
, 'mountpt': TestConfig.cifsmountpoint
, 'pass': TestConfig.userApass
} )
status=os.system(mountcommand)
self.assertEqual(status, 0, 'CIFS Mount failure')
f = open(TestConfig.cifsmountpoint+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.cifsmountpoint+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content by user A in shared space')
f = open(TestConfig.cifsmountpoint+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.cifsmountpoint+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content by user A')
os.system('/sbin/umount.cifs '+TestConfig.cifsmountpoint)
def testSharedUserHTTPB(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.userBname, TestConfig.userBpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can create file in User A's area by HTTP! " + str(message)
#print "URI: "+TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp'
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can update file in User A's shared area by HTTP! " + str(message)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can update file in User A's collab area by HTTP! " + str(message)
def testSharedUserHTTPRGLeader(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
try:
self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage=phan.read()
self.assertEqual(thepage,modifystring)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
disallowed = True
assert disallowed, "Group leader can update file in User A's shared area by HTTP!"
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed,"Group leader can update file in User A's collab area by HTTP! " + str(message)
def testSharedUserHTTPCollab(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.collabname, TestConfig.collabpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
thepage=None
try:
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
except:
pass
assert (thepage==None), "Collaborator can read file in User A's shared area by HTTP!"
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed,"Collaborator can update file in User A's shared area by HTTP! " + str(message)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "Collaborator can update file in User A's collab area by HTTP! " + str(message)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testSharedUserCIFS"
, "testSharedUserHTTPB"
, "testSharedUserHTTPRGLeader"
, "testSharedUserHTTPCollab"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testReadMeSSH"
, "testReadMeDAVfs"
, "testCreateFileDAVfs"
, "testUpdateFileDAVfs"
, "testDeleteFileDAVfs"
, "testDeleteFileCIFS"
, "testDeleteFileHTTP"
]
}
return TestUtils.getTestSuite(TestFileCIFSwriteHTTPread, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileCIFSwriteHTTPread", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestFileCIFSwriteHTTPread.py",
"copies": "2",
"size": "11091",
"license": "mit",
"hash": 6368491128259084000,
"line_mean": 40.5393258427,
"line_max": 138,
"alpha_frac": 0.6344784059,
"autogenerated": false,
"ratio": 4.0404371584699454,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5674915564369946,
"avg_score": null,
"num_lines": null
} |
# $Id: TestFilecollabArea.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for access to collab file area
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
import TestCifsUtils
import TestHttpUtils
class TestFileCollabArea(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def HTTP_redirect(self, opener, method, uri, data, content_type):
TestHttpUtils.do_HTTP_redirect(opener, method, uri, data, content_type)
return
def cifsMountAs(self, userArea, userName, userPass):
status= TestCifsUtils.do_cifsMount('collab/'+userArea, userName, userPass)
if status != 8192:
self.assertEqual(status, 0, 'CIFS Mount failure')
return status
def cifsMount(self, userName, userPass):
self.cifsMountAs(userName, userName, userPass)
return
def cifsUnmount(self):
TestCifsUtils.do_cifsUnmount()
return
def cifsCreateFile(self, fileName, createFileContent):
TestCifsUtils.do_cifsCreateFile(fileName, createFileContent)
return createFileContent
def cifsReadFile(self, fileName ):
readFileContent = TestCifsUtils.do_cifsReadFile(fileName)
return readFileContent
def cifsUpdateFile(self,fileName, updateFileContent):
TestCifsUtils.do_cifsUpdateFile(fileName, updateFileContent)
return updateFileContent
def cifsDeleteFile(self,fileName):
deleteMessage = TestCifsUtils.do_cifsDeleteFile(fileName)
return deleteMessage
def httpAuthenticationHandler(self,userName, userPass):
authhandler = TestHttpUtils.do_httpAuthenticationHandler(userName, userPass)
return authhandler
def httpCreateFileAs(self, areaName, userName, userPass, fileName, fileContent):
createMessage = TestHttpUtils.do_httpCreateFile('collab/'+areaName, userName, userPass, fileName, fileContent)
return createMessage
def httpCreateFile(self, userName, userPass, fileName, fileContent):
createMessage = self.httpCreateFileAs(userName, userName, userPass, fileName, fileContent)
return createMessage
def httpReadFileAs(self, areaName, userName, userPass,fileName):
readFileContent = TestHttpUtils.do_httpReadFile( 'collab/'+areaName, userName, userPass,fileName)
return readFileContent
def httpReadFile(self, userName, userPass,fileName):
readFileContent = self.httpReadFileAs(userName, userName, userPass,fileName)
return readFileContent
def httpUpdateFileAs(self, areaName, userName, userPass,fileName, updateFileContent):
updateMessage = TestHttpUtils.do_httpUpdateFile('collab/'+areaName, userName, userPass,fileName, updateFileContent)
return updateMessage
def httpUpdateFile(self, userName, userPass,fileName, updateFileContent):
updateMessage = self.httpUpdateFileAs(userName, userName, userPass,fileName, updateFileContent)
return updateMessage
def httpDeleteFileAs(self, areaName, userName, userPass,fileName):
deleteMessage = TestHttpUtils.do_httpDeleteFile( 'collab/'+areaName, userName, userPass,fileName)
return deleteMessage
def httpDeleteFile(self, userName, userPass,fileName):
deleteMessage = self.httpDeleteFileAs(userName, userName, userPass,fileName)
return deleteMessage
def testNull(self):
assert (True), "True expected"
return
# Test User A's access permissions in his own collab area
def testUserACreateCIFSUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createdFileContent = self.cifsCreateFile(fileName, fileContent)
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(createdFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateCIFSUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createdFileContent = self.cifsCreateFile(fileName, fileContent)
readFileContent = self.httpReadFile( TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(createdFileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateCIFSUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUpdateFile(fileName, fileUpdateContent)
updatedFileContent = fileContent + fileUpdateContent
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateHTTPUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
updatedFileContent= fileContent + fileUpdateContent
# HTTP Update overwrites(does not append the original) the file, hence expecting the updated content when read again.
updateMessage = self.httpUpdateFile(TestConfig.userAname, TestConfig.userApass,fileName,updatedFileContent)
self.assertEqual(updateMessage[0],0,"Update file failed: "+str(updateMessage))
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateCIFSUserADeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsDeleteFile(fileName)
self.cifsUnmount()
return
def testUserACreateCIFSUserADeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
deleteMessage = self.httpDeleteFile(TestConfig.userAname, TestConfig.userApass, fileName)
self.assertEqual(deleteMessage[0],0,"Delete file failed: "+str(deleteMessage))
self.cifsUnmount()
return
def testUserACreateHTTPUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(fileContent,readFileContent)
return
def testUserACreateHTTPUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(fileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateHTTPUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFile(TestConfig.userAname, TestConfig.userApass,fileName,fileUpdateContent)
self.assertEqual(updateMessage[0],0,"Update file failed: "+str(updateMessage))
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(fileUpdateContent,readFileContent)
return
def testUserAUpdateCIFSUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsUpdateFile(fileName, fileUpdateContent)
updatedFileContent= fileContent + fileUpdateContent
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateHTTPUserADeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFile(TestConfig.userAname, TestConfig.userApass, fileName)
self.assertEqual(deleteMessage[0],0,"Delete file failed: "+str(deleteMessage))
return
def testUserACreateHTTPUserADeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsDeleteFile(fileName)
self.cifsUnmount()
return
# Test User B's access permissions on files in User A's collab area
def testUserBCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
allowed = True
try:
self.cifsCreateFile(fileName, fileContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
allowed = False
assert allowed, "User B is not able to create a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSUserBReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(fileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateCIFSUserBUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
allowed = True
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
allowed = False
assert allowed, "User B is not able to update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSUserBDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
deleteMessage = self.cifsDeleteFile(fileName)
self.assertNotEquals(deleteMessage[0], 13,
"Not Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testUserBCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName, fileContent)
#self.assertEqual(createMessage[0], 401, "User B can create a file in User A's filespace by HTTP, got: "+str(createMessage))
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
return
def testUserACreateHTTPUserBReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.httpReadFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName)
self.assertEqual(fileContent,readFileContent,"User B cannot read the file in User A's filespace by HTTP")
return
def testUserACreateHTTPUserBUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName,fileUpdateContent)
self.assertNotEquals(updateMessage[0], 401,
"Not Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPUserBDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName)
#print repr(deleteMessage)
self.assertNotEquals(deleteMessage[0], 401,
"Not Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
# Sentinel/placeholder tests
# Test Collaborator's access permissions on files in User A's Collab area
def testCollabCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
disallowed = False
if status!=8192 :
try:
self.cifsCreateFile(fileName, fileContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can create a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
allowed = True
if status!=8192 :
try:
self.cifsReadFile(fileName)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
allowed = False
assert allowed, "Collaborator is not able to read a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
status = self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
if status!=8192 :
disallowed = False
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
if status!=8192 :
deleteMessage = self.cifsDeleteFile(fileName)
self.assertEquals(deleteMessage[0], 13,
"Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testCollabCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName, fileContent)
self.assertEqual(createMessage[0], 401, "User B is able to create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateHTTPCollabReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
allowed = True
try:
self.httpReadFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
allowed = False
assert allowed, "Collaborator is not able to read a file in User A's filespace by HTTP!"
return
def testUserACreateHTTPCollabUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName,fileContent)
self.assertEquals(updateMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPCollabDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName)
self.assertEquals(deleteMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
# Test RG Leader's access permissions on files in User A's Collab area
def testRGLeaderCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName, fileContent)
self.assertNotEqual(createMessage[0], 401, "RG Leader can create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateCIFSRGLeaderReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
allowed = True
try:
self.cifsReadFile(fileName)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
allowed = False
assert allowed, "Research Group Leader is not able to read a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSRGLeaderUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
allowed = True
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
allowed = True
assert allowed, "Research Group Leader is not able to update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSRGLeaderDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
deleteMessage = self.cifsDeleteFile(fileName)
self.assertNotEquals(deleteMessage[0], 13,
"Not Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testRGLeaderCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName, fileContent)
self.assertNotEqual(createMessage[0], 401, "Research Group Leader is not able to create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateHTTPRGLeaderReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.httpReadFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName)
self.assertEqual(fileContent,readFileContent,"Research Group Leader is not able to read the file in User A's filespace by HTTP")
return
def testUserACreateHTTPRGLeaderUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName,fileContent)
self.assertNotEquals(updateMessage[0], 401,
"Not Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPRGLeaderDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName)
self.assertNotEquals(deleteMessage[0], 401,
"Not Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
# Test User A's access permissions in his own Collab area
, "testUserACreateCIFSUserAReadCIFS"
, "testUserACreateCIFSUserAReadHTTP"
, "testUserAUpdateCIFSUserAReadCIFS"
, "testUserAUpdateHTTPUserAReadCIFS"
, "testUserACreateCIFSUserADeleteCIFS"
, "testUserACreateCIFSUserADeleteHTTP"
, "testUserACreateHTTPUserAReadHTTP"
, "testUserACreateHTTPUserAReadCIFS"
, "testUserAUpdateHTTPUserAReadHTTP"
, "testUserAUpdateCIFSUserAReadHTTP"
, "testUserACreateHTTPUserADeleteHTTP"
, "testUserACreateHTTPUserADeleteCIFS"
# Test User B's access permissions on files in User A's Collab area
, "testUserBCreateCIFSInUserA"
, "testUserACreateCIFSUserBReadCIFS"
, "testUserACreateCIFSUserBUpdateCIFS"
, "testUserACreateCIFSUserBDeleteCIFS"
, "testUserBCreateHTTPInUserA"
, "testUserACreateHTTPUserBReadHTTP"
, "testUserACreateHTTPUserBUpdateHTTP"
, "testUserACreateHTTPUserBDeleteHTTP"
# Test Collaborator's access permissions on files in User A's Collab area
, "testCollabCreateCIFSInUserA"
, "testUserACreateCIFSCollabReadCIFS"
, "testUserACreateCIFSCollabUpdateCIFS"
, "testUserACreateCIFSCollabDeleteCIFS"
, "testCollabCreateHTTPInUserA"
, "testUserACreateHTTPCollabReadHTTP"
, "testUserACreateHTTPCollabUpdateHTTP"
, "testUserACreateHTTPCollabDeleteHTTP"
# Test RG Leader's access permissions on files in User A's Collab area
, "testRGLeaderCreateCIFSInUserA"
, "testUserACreateCIFSRGLeaderReadCIFS"
, "testUserACreateCIFSRGLeaderUpdateCIFS"
, "testUserACreateCIFSRGLeaderDeleteCIFS"
, "testRGLeaderCreateHTTPInUserA"
, "testUserACreateHTTPRGLeaderReadHTTP"
, "testUserACreateHTTPRGLeaderUpdateHTTP"
, "testUserACreateHTTPRGLeaderDeleteHTTP"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestFileCollabArea, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileCollabArea.log", getTestSuite, sys.argv)
# End.
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/FileShare/tests/TestFileCollabArea.py",
"copies": "1",
"size": "32069",
"license": "mit",
"hash": -4896925657843513000,
"line_mean": 47.7386018237,
"line_max": 160,
"alpha_frac": 0.6756369079,
"autogenerated": false,
"ratio": 3.9689356435643566,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9998137619949103,
"avg_score": 0.029286986303050547,
"num_lines": 658
} |
# $Id: TestFileDefaultArea.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for TestFileDefaultArea module
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
class TestFileDefaultArea(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testUnauthUserHTTP(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.userAname, TestConfig.userApass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
thepage=None
try:
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/ADMIRAL.README')
thepage = pagehandle.read()
except urllib2.HTTPError,e:
if e.code!=403 and e.code!=401:
raise e
pass
assert (thepage==None), "Unauthenticated user can access filesystem!"
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testUnauthUserHTTP"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testReadMeSSH"
, "testReadMeDAVfs"
, "testCreateFileDAVfs"
, "testUpdateFileDAVfs"
, "testDeleteFileDAVfs"
, "testDeleteFileCIFS"
, "testDeleteFileHTTP"
]
}
return TestUtils.getTestSuite(TestFileDefaultArea, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileDefaultArea", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestFileDefaultArea.py",
"copies": "2",
"size": "2735",
"license": "mit",
"hash": 1829731080190174500,
"line_mean": 24.3240740741,
"line_max": 105,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 4.106606606606607,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5706606606606608,
"avg_score": null,
"num_lines": null
} |
# $Id: TestFilePrivateArea.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for access to private file area
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
import TestCifsUtils
import TestHttpUtils
class TestFilePrivateArea(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
#self.cifsUnmount()
return
def HTTP_redirect(self, opener, method, uri, data, content_type):
TestHttpUtils.do_HTTP_redirect(opener, method, uri, data, content_type)
return
def cifsMountAs(self, userArea, userName, userPass):
status= TestCifsUtils.do_cifsMount(''+userArea, userName, userPass)
if status != 8192:
self.assertEqual(status, 0, 'CIFS Mount failure')
return status
def cifsMount(self, userName, userPass):
self.cifsMountAs(userName, userName, userPass)
return
def cifsUnmount(self):
TestCifsUtils.do_cifsUnmount()
return
def cifsCreateFile(self, fileName, createFileContent):
TestCifsUtils.do_cifsCreateFile(fileName, createFileContent)
return createFileContent
def cifsReadFile(self, fileName ):
readFileContent = TestCifsUtils.do_cifsReadFile(fileName)
return readFileContent
def cifsUpdateFile(self,fileName, updateFileContent):
TestCifsUtils.do_cifsUpdateFile(fileName, updateFileContent)
return updateFileContent
def cifsDeleteFile(self,fileName):
deleteMessage = TestCifsUtils.do_cifsDeleteFile(fileName)
return deleteMessage
def httpAuthenticationHandler(self,userName, userPass):
authhandler = TestHttpUtils.do_httpAuthenticationHandler(userName, userPass)
return authhandler
def httpCreateFileAs(self, areaName, userName, userPass, fileName, fileContent):
createMessage = TestHttpUtils.do_httpCreateFile(''+areaName, userName, userPass, fileName, fileContent)
return createMessage
def httpCreateFile(self, userName, userPass, fileName, fileContent):
createMessage = self.httpCreateFileAs(userName, userName, userPass, fileName, fileContent)
return createMessage
def httpReadFileAs(self, areaName, userName, userPass,fileName):
readFileContent = TestHttpUtils.do_httpReadFile( ''+areaName, userName, userPass,fileName)
return readFileContent
def httpReadFile(self, userName, userPass,fileName):
readFileContent = self.httpReadFileAs(userName, userName, userPass,fileName)
return readFileContent
def httpUpdateFileAs(self, areaName, userName, userPass,fileName, updateFileContent):
updateMessage = TestHttpUtils.do_httpUpdateFile(''+areaName, userName, userPass,fileName, updateFileContent)
return updateMessage
def httpUpdateFile(self, userName, userPass,fileName, updateFileContent):
updateMessage = self.httpUpdateFileAs(userName, userName, userPass,fileName, updateFileContent)
return updateMessage
def httpDeleteFileAs(self, areaName, userName, userPass,fileName):
deleteMessage = TestHttpUtils.do_httpDeleteFile( ''+areaName, userName, userPass,fileName)
return deleteMessage
def httpDeleteFile(self, userName, userPass,fileName):
deleteMessage = self.httpDeleteFileAs(userName, userName, userPass,fileName)
return deleteMessage
def testNull(self):
assert (True), "True expected"
return
# Test User A's access permissions in his own Private area
def testUserACreateCIFSUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createdFileContent = self.cifsCreateFile(fileName, fileContent)
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(createdFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateCIFSUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createdFileContent = self.cifsCreateFile(fileName, fileContent)
readFileContent = self.httpReadFile( TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(createdFileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateCIFSUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUpdateFile(fileName, fileUpdateContent)
updatedFileContent = fileContent + fileUpdateContent
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateHTTPUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
updatedFileContent= fileContent + fileUpdateContent
# HTTP Update overwrites(does not append the original) the file, hence expecting the updated content when read again.
updateMessage = self.httpUpdateFile(TestConfig.userAname, TestConfig.userApass,fileName,updatedFileContent)
self.assertEqual(updateMessage[0],0,"Update file failed: "+str(updateMessage))
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateCIFSUserADeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsDeleteFile(fileName)
self.cifsUnmount()
return
def testUserACreateCIFSUserADeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
deleteMessage = self.httpDeleteFile(TestConfig.userAname, TestConfig.userApass, fileName)
self.assertEqual(deleteMessage[0],0,"Delete file failed: "+str(deleteMessage))
self.cifsUnmount()
return
def testUserACreateHTTPUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(fileContent,readFileContent)
return
def testUserACreateHTTPUserAReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
readFileContent = self.cifsReadFile(fileName)
self.assertEqual(fileContent,readFileContent)
self.cifsUnmount()
return
def testUserAUpdateHTTPUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFile(TestConfig.userAname, TestConfig.userApass,fileName,fileUpdateContent)
self.assertEqual(updateMessage[0],0,"Update file failed: "+str(updateMessage))
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(fileUpdateContent,readFileContent)
return
def testUserAUpdateCIFSUserAReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsUpdateFile(fileName, fileUpdateContent)
updatedFileContent= fileContent + fileUpdateContent
readFileContent = self.httpReadFile(TestConfig.userAname, TestConfig.userApass,fileName)
self.assertEqual(updatedFileContent,readFileContent)
self.cifsUnmount()
return
def testUserACreateHTTPUserADeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFile(TestConfig.userAname, TestConfig.userApass, fileName)
self.assertEqual(deleteMessage[0],0,"Delete file failed: "+str(deleteMessage))
return
def testUserACreateHTTPUserADeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsDeleteFile(fileName)
self.cifsUnmount()
return
# Test User B's access permissions on files in User A's Private area
def testUserBCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
disallowed = False
try:
self.cifsCreateFile(fileName, fileContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "User B can create a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSUserBReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
disallowed = False
try:
self.cifsReadFile(fileName)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "User B can read a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSUserBUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
disallowed = False
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "User B can update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSUserBDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass)
deleteMessage = self.cifsDeleteFile(fileName)
self.assertEquals(deleteMessage[0], 13,
"Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testUserBCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName, fileContent)
self.assertEqual(createMessage[0], 401, "User B can create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateHTTPUserBReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
disallowed = False
try:
self.httpReadFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
disallowed = True
assert disallowed, "User B can read a file in User A's filespace by HTTP!"
return
def testUserACreateHTTPUserBUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName,fileContent)
self.assertEquals(updateMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPUserBDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.userBname, TestConfig.userBpass, fileName)
#print repr(deleteMessage)
self.assertEquals(deleteMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
# Sentinel/placeholder tests
# Test Collaborator's access permissions on files in User A's Private area
def testCollabCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
disallowed = False
if status!=8192 :
try:
self.cifsCreateFile(fileName, fileContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can create a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
disallowed = False
if status!=8192 :
try:
self.cifsReadFile(fileName)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can read a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
status = self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
if status!=8192 :
disallowed = False
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSCollabDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
status = self.cifsMountAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass)
if status!=8192 :
deleteMessage = self.cifsDeleteFile(fileName)
self.assertEquals(deleteMessage[0], 13,
"Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testCollabCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName, fileContent)
self.assertEqual(createMessage[0], 401, "User B can create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateHTTPCollabReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
disallowed = False
try:
self.httpReadFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
disallowed = True
assert disallowed, "Collaborator can read a file in User A's filespace by HTTP!"
return
def testUserACreateHTTPCollabUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName,fileContent)
self.assertEquals(updateMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPCollabDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.collabname, TestConfig.collabpass, fileName)
self.assertEquals(deleteMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
# Test RG Leader's access permissions on files in User A's Private area
def testRGLeaderCreateCIFSInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName, fileContent)
self.assertEqual(createMessage[0], 401, "RG Leader can create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateCIFSRGLeaderReadCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
disallowed = True
try:
self.cifsReadFile(fileName)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = False
assert disallowed, "Research Group Leader cannot read a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSRGLeaderUpdateCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
disallowed = False
try:
self.cifsUpdateFile(fileName,fileUpdateContent)
except IOError as e:
self.assertEqual(e.errno, 13, "Operation should fail with error 13, was: "+str(e))
self.assertEqual(e.strerror, "Permission denied", "Operation should fail with 'Permission denied', was: "+str(e))
disallowed = True
assert disallowed, "Research Group Leader can update a file in User A's filespace by WebDAV!"
self.cifsUnmount()
return
def testUserACreateCIFSRGLeaderDeleteCIFS(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
fileUpdateContent= 'Test update of file\n'
self.cifsMount(TestConfig.userAname, TestConfig.userApass)
self.cifsCreateFile(fileName, fileContent)
self.cifsUnmount()
self.cifsMountAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
deleteMessage = self.cifsDeleteFile(fileName)
self.assertEquals(deleteMessage[0], 13,
"Expected (13, Permission denied) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
self.cifsUnmount()
return
def testRGLeaderCreateHTTPInUserA(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName, fileContent)
self.assertEqual(createMessage[0], 401, "Research Group Leader can create a file in User A's filespace by HTTP, got: "+str(createMessage))
return
def testUserACreateHTTPRGLeaderReadHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
disallowed = True
try:
self.httpReadFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
disallowed = Fasle
assert disallowed, "Collaborator cannot read a file in User A's filespace by HTTP!"
return
def testUserACreateHTTPRGLeaderUpdateHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
updateMessage = self.httpUpdateFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName,fileContent)
self.assertEquals(updateMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(updateMessage))
return
def testUserACreateHTTPRGLeaderDeleteHTTP(self):
fileName = 'testCreateFileCIFS.tmp'
fileContent= 'Test creation of file\n'
createMessage = self.httpCreateFile(TestConfig.userAname, TestConfig.userApass, fileName, fileContent)
self.assertEqual(createMessage[0],0,"Create file failed: "+str(createMessage))
deleteMessage = self.httpDeleteFileAs(TestConfig.userAname, TestConfig.userRGleadername, TestConfig.userRGleaderpass, fileName)
self.assertEquals(deleteMessage[0], 401,
"Expected (401, basic authentication failed) for "+TestConfig.cifsmountpoint + '/'+ fileName+"'"+
", got: "+str(deleteMessage))
return
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
# Test User A's access permissions in his own Private area
, "testUserACreateCIFSUserAReadCIFS"
, "testUserACreateCIFSUserAReadHTTP"
, "testUserAUpdateCIFSUserAReadCIFS"
, "testUserAUpdateHTTPUserAReadCIFS"
, "testUserACreateCIFSUserADeleteCIFS"
, "testUserACreateCIFSUserADeleteHTTP"
, "testUserACreateHTTPUserAReadHTTP"
, "testUserACreateHTTPUserAReadCIFS"
, "testUserAUpdateHTTPUserAReadHTTP"
, "testUserAUpdateCIFSUserAReadHTTP"
, "testUserACreateHTTPUserADeleteHTTP"
, "testUserACreateHTTPUserADeleteCIFS"
# Test User B's access permissions on files in User A's Private area
, "testUserBCreateCIFSInUserA"
, "testUserACreateCIFSUserBReadCIFS"
, "testUserACreateCIFSUserBUpdateCIFS"
, "testUserACreateCIFSUserBDeleteCIFS"
, "testUserBCreateHTTPInUserA"
, "testUserACreateHTTPUserBReadHTTP"
, "testUserACreateHTTPUserBUpdateHTTP"
, "testUserACreateHTTPUserBDeleteHTTP"
# Test Collaborator's access permissions on files in User A's Private area
, "testCollabCreateCIFSInUserA"
, "testUserACreateCIFSCollabReadCIFS"
, "testUserACreateCIFSCollabUpdateCIFS"
, "testUserACreateCIFSCollabDeleteCIFS"
, "testCollabCreateHTTPInUserA"
, "testUserACreateHTTPCollabReadHTTP"
, "testUserACreateHTTPCollabUpdateHTTP"
, "testUserACreateHTTPCollabDeleteHTTP"
# Test RG Leader's access permissions on files in User A's Private area
, "testRGLeaderCreateCIFSInUserA"
, "testUserACreateCIFSRGLeaderReadCIFS"
, "testUserACreateCIFSRGLeaderUpdateCIFS"
, "testUserACreateCIFSRGLeaderDeleteCIFS"
, "testRGLeaderCreateHTTPInUserA"
, "testUserACreateHTTPRGLeaderReadHTTP"
, "testUserACreateHTTPRGLeaderUpdateHTTP"
, "testUserACreateHTTPRGLeaderDeleteHTTP"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestFilePrivateArea, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFilePrivateArea.log", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestFileSilkGroup.py",
"copies": "2",
"size": "32472",
"license": "mit",
"hash": -7191330283020728000,
"line_mean": 47.4656716418,
"line_max": 148,
"alpha_frac": 0.6728566149,
"autogenerated": false,
"ratio": 3.9896793217840028,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5662535936684003,
"avg_score": null,
"num_lines": null
} |
# $Id: TestFileUserAPublic.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileUserAPublic module
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
# Initialize authenticated HTTP connection opener
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.collabname, TestConfig.collabpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
class TestFileUserAPublic(unittest.TestCase):
def setUp(self):
mountcommand = ( '/sbin/mount.cifs //%(host)s/files/%(userA)s %(mountpt)s -o rw,user=%(user)s,password=%(pass)s,nounix,forcedirectio' %
{ 'host': TestConfig.hostname
, 'userA': TestConfig.userAname
, 'user': TestConfig.collabname
, 'mountpt': TestConfig.cifsmountpoint
, 'pass': TestConfig.collabpass
} )
status=os.system(mountcommand)
self.assertEqual(status, 0, 'CIFS Mount failure')
return
def tearDown(self):
os.system('/sbin/umount.cifs '+TestConfig.cifsmountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMeSSH(self):
f=os.system(ssh_string)
print f
return
def testReadMeCIFS(self):
# Test assumes ADMIRAL shared file system is mounted at mountpoint
# Open README file
f=None
try:
f = open(TestConfig.cifsmountpoint+'/'+TestConfig.readmefile)
except:
pass
assert (f==None), "Public user can read User A's file!"
return
def testCreateFileCIFS(self):
f=None
try:
f = open(TestConfig.cifsmountpoint+'/testCreateFile.tmp','w+')
except:
pass
assert (f==None), "Public user can create files in User A's filespace!"
return
def testUpdateFileCIFS(self):
f=None
try:
f = open(TestConfig.cifsmountpoint+'/'+'TestConfig.readmefile','w+')
except:
pass
assert (f==None), "Public user can open User A's files for writing!"
return
def testDeleteFileCIFS(self):
filename1 = TestConfig.cifsmountpoint+'/testCreateFile.tmp'
filename2 = TestConfig.cifsmountpoint+'/testUpdateFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
return
def testReadMeDAVfs(self):
# Test assumes ADMIRAL shared file system is mounted at mountpoint
# Open README file
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
f = open(TestConfig.webdavmountpoint+'/'+TestConfig.readmefile)
assert (f), "README file open failed (DAVfs)"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, TestConfig.readmetext, 'Unexpected README content')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testCreateFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
f = open(TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testUpdateFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
filename = TestConfig.webdavmountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testDeleteFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
filename1 = TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp'
filename2 = TestConfig.webdavmountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
os.system('umount '+TestConfig.webdavmountpoint)
return
def testReadMeHTTP(self):
thepage=None
try:
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/'+TestConfig.readmefile)
thepage = pagehandle.read()
except:
pass
assert (thepage==None), "Public user can read User A's file by HTTP!"
return
def testCreateFileHTTP(self):
thepage=None
createstring="Testing file creation with WebDAV"
try:
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp', data=createstring)
req.add_header('Content-Type', 'text/plain')
req.get_method = lambda: 'PUT'
url=opener.open(req)
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
thepage=phan.read()
except:
pass
assert (thepage==None), "Public user can create a file in User A's filespace by HTTP!"
return
def testUpdateFileHTTP(self):
thepage=None
updatestring="Testing file modification with WebDAV"
try:
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp', data=updatestring)
req.get_method = lambda: 'PUT'
url=opener.open(req)
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
thepage=phan.read()
except:
pass
assert (thepage!=updatestring), "Public user can update User A's file by HTTP!"
return
def testDeleteFileHTTP(self):
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
req.get_method = lambda: 'DELETE'
url=opener.open(req)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMeCIFS"
, "testReadMeHTTP"
, "testCreateFileCIFS"
, "testCreateFileHTTP"
, "testUpdateFileCIFS"
, "testUpdateFileHTTP"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testReadMeSSH"
, "testReadMeDAVfs"
, "testCreateFileDAVfs"
, "testUpdateFileDAVfs"
, "testDeleteFileDAVfs"
, "testDeleteFileCIFS"
, "testDeleteFileHTTP"
]
}
return TestUtils.getTestSuite(TestFileUserAPublic, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileUserAPublic", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestFileUserAPublic.py",
"copies": "2",
"size": "10033",
"license": "mit",
"hash": -5924021365602610000,
"line_mean": 32.4433333333,
"line_max": 143,
"alpha_frac": 0.5947373667,
"autogenerated": false,
"ratio": 3.9956192751891675,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017141002991059265,
"num_lines": 300
} |
# $Id: TestHttpSession.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging, zipfile, os, re
from os.path import normpath
#Add main library directory to python path
###sys.path.append("../../../test")
sys.path.append("..")
from MiscLib import TestUtils
import HttpSession, TestConfig
logger = logging.getLogger("TestHttpSession")
class TestHttpSession(unittest.TestCase):
def setUp(self):
# Tests run against Databank via local proxy
self.endpointhost = TestConfig.HostName
self.basepath = "/"+TestConfig.SiloName+"/"
self.username = TestConfig.Username
self.password = TestConfig.Password
return
def tearDown(self):
return
def testSimpleHttpGet(self):
session = HttpSession.makeHttpSession(self.endpointhost, self.basepath, self.username, self.password)
(responsetype, responsedata) = session.doHTTP_GET(endpointpath=self.basepath, resource="datasets", expect_status=200, expect_reason="OK", accept_type="*/*")
self.assertEquals(responsetype, "text/html", "List datasets response type")
self.assertTrue(re.search("<title>.*List of Datasets.*</title>", responsedata) != None, "List datasets response data")
return
def testSimpleHttpPost(self):
session = HttpSession.makeHttpSession(self.endpointhost, self.basepath, self.username, self.password)
fields = \
[ ("id", "test-dataset")
]
files =[]
(reqtype, reqdata) = session.encode_multipart_formdata(fields, files)
(responsetype, responsedata) = session.doHTTP_POST(endpointpath=self.basepath, resource="datasets", data=reqdata, data_type=reqtype, expect_status=201, expect_reason="Created", accept_type="*/*")
self.assertEquals(responsetype, "text/plain", "Create dataset response type: "+responsetype)
self.assertEquals(responsedata, "201 Created", "Create dataset response data: "+responsedata)
#self.assertTrue(re.search("<title>.*List of Datasets.*</title>", responsedata) != None, "Create dataset response data")
# Do GET to datasets/test-dataset
(responsetype, responsedata) = session.doHTTP_GET(endpointpath=self.basepath, resource="datasets/test-dataset", expect_status=200, expect_reason="OK", accept_type="*/*")
self.assertEquals(responsetype, "text/html", "Get dataset response type")
logger.debug("responsetype"+responsetype);
self.assertTrue(re.search(".*Root directory of.*", responsedata) != None, "Get dataset response data")
return
def testSimpleHttpDelete(self):
session = HttpSession.makeHttpSession(self.endpointhost, self.basepath, self.username, self.password)
(responsetype, responsedata) = session.doHTTP_DELETE(endpointpath=self.basepath, resource="datasets/test-dataset", expect_status=200, expect_reason="OK")
#self.assertEquals(responsetype, "text/html", "Delete dataset response type")
#self.assertTrue(re.search("<title>.*Del Dataset.*</title>", responsedata) != None, "Delete dataset response data")
# Do GET to datasets/test-dataset
(responsetype, responsedata) = session.doHTTP_GET(endpointpath=self.basepath, resource="datasets/test-dataset", expect_status=404, expect_reason="Not Found", accept_type="*/*")
#self.assertEquals(responsetype, "text/html", "Get dataset response type")
#self.assertTrue(re.search(".*Root directory of.*", responsedata) != None, "Get dataset response data")
return
# Test the Dataset Creation: <TestSubmission>
def testZZZZZ(self):
return
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be testSimpleHttpGet_origrun
"""
testdict = {
"unit":
[ #"testUnits"
#"testSimpleHttpGet_orig",
"testSimpleHttpGet",
"testSimpleHttpPost",
"testSimpleHttpDelete"
],
"component":
[ #"testComponents"
],
"integration":
[ #"testIntegration"
],
"pending":
[ #"testPending"
]
}
return TestUtils.getTestSuite(TestHttpSession, testdict, select=select)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
TestConfig.setDatasetsBaseDir(".")
TestUtils.runTests("TestHttpSession.log", getTestSuite, sys.argv)
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "src/SubmitDatasetHandler/tests/TestHttpSession.py",
"copies": "2",
"size": "4942",
"license": "mit",
"hash": 5834073734519868000,
"line_mean": 43.9272727273,
"line_max": 203,
"alpha_frac": 0.6513557264,
"autogenerated": false,
"ratio": 4.064144736842105,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018391903695943023,
"num_lines": 110
} |
from unittest import TestCase
from peloton.utils.structs import ReadOnlyDict
from peloton.utils.structs import FilteredOptionParser
from peloton.utils.structs import RoundRobinList
from types import ListType
class Test_ReadOnlyDict(TestCase):
def test_readOnlyDict(self):
d = ReadOnlyDict()
d['x'] = 10
self.assertEquals(d['x'], 10)
self.assertRaises(Exception, d.__setitem__, 'x', 11)
d.setRewriteable(['x'])
d['x'] = 20
self.assertEquals(d['x'], 20)
d.setRewriteable('t')
d['t'] = 10
self.assertEquals(d['t'], 10)
d['t'] = 11
self.assertEquals(d['t'], 11)
class Test_FilteredOptionParser(TestCase):
def setUp(self):
self.fo = FilteredOptionParser()
self.fo.add_option('--prefix', default='/etc/')
self.fo.add_option('-c', dest='configdir', default='$PREFIX')
self.fo.add_option('-o', dest='outputdir', default='/')
def tearDown(self):
pass
def test_noOpts(self):
o,a = self.fo.parse_args([])
self.assertEquals(o.prefix, '/etc/')
self.assertEquals(o.configdir, '/etc/')
self.assertEquals(o.outputdir, '/')
def test_overidePrefix(self):
o,a = self.fo.parse_args(['--prefix=/tmp'])
self.assertEquals(o.prefix, '/tmp')
self.assertEquals(o.configdir, '/tmp')
self.assertEquals(o.outputdir, '/')
def test_manualUse(self):
o,a = self.fo.parse_args(['--prefix=/tmp', '-o', '$PREFIX/output'])
self.assertEquals(o.prefix, '/tmp')
self.assertEquals(o.configdir, '/tmp')
self.assertEquals(o.outputdir, '/tmp/output')
def test_filterArgs(self):
o, a = self.fo.parse_args(['--prefix=/tmp', '$PREFIX/test.xml'])
self.assertEquals(a[0], '/tmp/test.xml')
def test_argsOnly(self):
o, a = self.fo.parse_args(['$PREFIX/test.xml', '$PREFIX'])
self.assertEquals(a[0], '/etc//test.xml')
self.assertEquals(a[1], '/etc/')
def test_overideSubstitutions(self):
self.fo.setSubstitutions(prefix='/var')
o,a = self.fo.parse_args(['-o', '$PREFIX/output'])
self.assertEquals(o.prefix, '/var')
self.assertEquals(o.configdir, '/var')
self.assertEquals(o.outputdir, '/var/output')
def test_complexOverideSubstitutions(self):
self.fo.setSubstitutions(prefix='/var/$OUTPUTDIR/test')
o,a = self.fo.parse_args(['-o','/usr/local'])
self.assertEquals(o.prefix, '/var//usr/local/test')
self.assertEquals(o.configdir, '/var//usr/local/test')
self.assertEquals(o.outputdir, '/usr/local')
class Test_RoundRobinList(TestCase):
def setUp(self):
self.thelist = RoundRobinList(['a','b','c','d'])
def tearDown(self):
pass
def test_iter(self):
self.assertEquals(self.thelist.rrnext(), 'a')
self.assertEquals(self.thelist.rrnext(), 'b')
self.assertEquals(self.thelist.rrnext(), 'c')
self.assertEquals(self.thelist.rrnext(), 'd')
self.assertEquals(self.thelist.rrnext(), 'a')
self.assertEquals(self.thelist.rrnext(), 'b')
self.assertEquals(self.thelist.rrnext(), 'c')
self.thelist.append('e')
self.assertEquals(self.thelist.rrnext(), 'd')
self.assertEquals(self.thelist.rrnext(), 'e')
self.assertEquals(self.thelist.rrnext(), 'a')
self.thelist.pop()
self.thelist.remove('d')
self.assertEquals(self.thelist.rrnext(), 'b')
self.assertEquals(self.thelist.rrnext(), 'c')
self.assertEquals(self.thelist.rrnext(), 'a')
def test_slicing(self):
newList = self.thelist[:3]
self.assertTrue(isinstance(newList, RoundRobinList))
self.assertEquals(newList.rrnext(), 'a')
self.assertEquals(newList.rrnext(), 'b')
self.assertEquals(newList.rrnext(), 'c')
self.assertEquals(newList.rrnext(), 'a')
newList = self.thelist[1:3]
self.assertTrue(isinstance(newList, RoundRobinList))
self.assertEquals(newList.rrnext(), 'b')
self.assertEquals(newList.rrnext(), 'c')
self.assertEquals(newList.rrnext(), 'b')
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/tests/testStructs.py",
"copies": "1",
"size": "4475",
"license": "bsd-3-clause",
"hash": -3986547263422614500,
"line_mean": 36.6134453782,
"line_max": 75,
"alpha_frac": 0.6004469274,
"autogenerated": false,
"ratio": 3.5098039215686274,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4610250848968627,
"avg_score": null,
"num_lines": null
} |
# $Id: test_symrec.py 1527 2006-05-07 16:41:16Z mu $
from tests import TestCase, add
from yasm import SymbolTable, Expression, YasmError
class TSymbolTable(TestCase):
def setUp(self):
self.symtab = SymbolTable()
def test_keys(self):
self.assertEquals(len(self.symtab.keys()), 0)
self.symtab.declare("foo", None, 0)
keys = self.symtab.keys()
self.assertEquals(len(keys), 1)
self.assertEquals(keys[0], "foo")
def test_contains(self):
self.assert_("foo" not in self.symtab)
self.symtab.declare("foo", None, 0)
self.assert_("foo" in self.symtab)
def test_exception(self):
expr = Expression('+', 1, 2)
self.symtab.define_equ("foo", expr, 0)
self.assertRaises(YasmError, self.symtab.define_equ, "foo", expr, 0)
self.symtab.define_equ("bar", expr, 0) # cleared
self.assertRaises(YasmError, self.symtab.define_special, "bar",
'global')
def test_iters(self):
tab = self.symtab
tab.declare("foo", None, 0)
tab.declare("bar", None, 0)
tab.declare("baz", None, 0)
# while ordering is not known, it must be consistent
self.assertEquals(list(tab.keys()), list(tab.iterkeys()))
self.assertEquals(list(tab.values()), list(tab.itervalues()))
self.assertEquals(list(tab.items()), list(tab.iteritems()))
self.assertEquals(list(tab.iteritems()), zip(tab.keys(), tab.values()))
add(TSymbolTable)
class TSymbolAttr(TestCase):
def setUp(self):
self.symtab = SymbolTable()
self.declsym = self.symtab.declare("foo", None, 0)
def test_visibility(self):
sym = self.symtab.declare("local1", None, 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local2", '', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local3", 'local', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("global", 'global', 0)
self.assertEquals(sym.visibility, set(['global']))
sym = self.symtab.declare("common", 'common', 0)
self.assertEquals(sym.visibility, set(['common']))
sym = self.symtab.declare("extern", 'extern', 0)
self.assertEquals(sym.visibility, set(['extern']))
sym = self.symtab.declare("dlocal", 'dlocal', 0)
self.assertEquals(sym.visibility, set(['dlocal']))
self.assertRaises(ValueError,
lambda: self.symtab.declare("extern2", 'foo', 0))
def test_name(self):
self.assertEquals(self.declsym.name, "foo")
def test_equ(self):
self.assertRaises(AttributeError, lambda: self.declsym.equ)
def test_label(self):
self.assertRaises(AttributeError, lambda: self.declsym.label)
def test_is_special(self):
self.assertEquals(self.declsym.is_special, False)
def test_is_curpos(self):
self.assertEquals(self.declsym.is_curpos, False)
add(TSymbolAttr)
| {
"repo_name": "relokin/parsec",
"path": "pkgs/tools/yasm/src/tools/python-yasm/tests/test_symrec.py",
"copies": "5",
"size": "3029",
"license": "bsd-3-clause",
"hash": -5842638296220731000,
"line_mean": 36.3950617284,
"line_max": 79,
"alpha_frac": 0.6229778805,
"autogenerated": false,
"ratio": 3.4856156501726123,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6608593530672612,
"avg_score": null,
"num_lines": null
} |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import logging
import unittest
sys.path.append("../..")
# Test class
class TestExample(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testCase(self):
assert "Some condition"
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscUtils import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testCase"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestExample, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestExample", getTestSuite, sys.argv)
# End.
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/tests/TestTestUtils.py",
"copies": "1",
"size": "1801",
"license": "mit",
"hash": 1977335408395965200,
"line_mean": 21.5125,
"line_max": 77,
"alpha_frac": 0.5813436979,
"autogenerated": false,
"ratio": 4.002222222222223,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083565920122223,
"avg_score": null,
"num_lines": null
} |
# $Id: TestTestUtils.py 1047 2009-01-15 14:48:58Z graham $
#
# Test test runner utiltities
#
import sys
import logging
import unittest
sys.path.append("../..")
# Test class
class TestExample(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testCase(self):
assert "Some condition"
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testCase"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestExample, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestExample", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/MiscLib/tests/TestTestUtils.py",
"copies": "8",
"size": "1615",
"license": "mit",
"hash": 3488159745750955000,
"line_mean": 20.25,
"line_max": 77,
"alpha_frac": 0.573993808,
"autogenerated": false,
"ratio": 4.194805194805195,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009298948045713762,
"num_lines": 76
} |
# $Id: TestUtils.py 1047 2009-01-15 14:48:58Z graham $
#
# Support functions for running different test suites
#
# Test suites are selected using a command line argument:
#
# Test classes are:
# "unit" These are stand-alone tests that all complete within a few
# seceonds and do not depend on resources external to the
# package being tested, (other than other libraries used).
# "component" These are tests that take loonger to run, or depend on
# external resources, (files, etc.) but do not depend on
# external services.
# "integration" These are tests that exercise interactions with seperate
# services.
# "pending" These are tests that have been designed and created, but
# for which the corresponding implementation has not been
# completed.
# "all" return suite of unit, component and integration tests
# name a single named test to be run.
#
import logging
import unittest
def getTestSuite(testclass,testdict,select="unit"):
"""
Assemble test suite from supplied class, dictionary and selector
testclass is the test class whose methods are test cases
testdict is a dictionary of test cases in named test suite,
keyed by "unit", "component", etc., or by a named test.
select is the test suite selector:
"unit" return suite of unit tests only
"component" return suite of component tests
"integrate" return suite of integration tests
"pending" return suite of pending tests
"all" return suite of unit and component tests
name a single named test to be run
"""
suite = unittest.TestSuite()
# Named test only
if select[0:3] not in ["uni","com","all","int","pen"]:
if not hasattr(testclass, select):
print "%s: no test named '%s'"%(testclass.__name__, select)
return None
suite.addTest(testclass(select))
return suite
# Select test classes to include
if select[0:3] == "uni":
testclasses = ["unit"]
elif select[0:3] == "com":
testclasses = ["component"]
elif select[0:3] == "int":
testclasses = ["integration"]
elif select[0:3] == "pen":
testclasses = ["pending"]
elif select[0:3] == "all":
testclasses = ["unit", "component"]
else:
testclasses = ["unit"]
for c in testclasses:
for t in testdict.get(c,[]):
if not hasattr(testclass, t):
print "%s: in '%s' tests, no test named '%s'"%(testclass.__name__, c, t)
return None
suite.addTest(testclass(t))
return suite
def runTests(logname, getSuite, args):
"""
Run unit tests based on supplied command line argument values
logname name for logging output file, if used
getSuite function to retrieve test suite, given selector value
args command line arguments (or equivalent values)
"""
sel = "unit"
vrb = 1
if len(args) > 1:
sel = args[1]
if sel[0:3] in ["uni","com","all","int","pen"]:
logging.basicConfig(level=logging.WARNING)
if sel[0:3] in ["com","all"]: vrb = 2
else:
# Run single test with elevated logging to file via new handler
logging.basicConfig(level=logging.DEBUG)
# Enable debug logging to a file
fileloghandler = logging.FileHandler(logname,"w")
fileloghandler.setLevel(logging.DEBUG)
# Use this formatter for shorter log records
###filelogformatter = logging.Formatter('%(levelname)s %(message)s', "%H:%M:%S")
# Use this formnatter to display timing information:
filelogformatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s', "%H:%M:%S")
fileloghandler.setFormatter(filelogformatter)
logging.getLogger('').addHandler(fileloghandler)
vrb = 2
runner = unittest.TextTestRunner(verbosity=vrb)
tests = getSuite(select=sel)
if tests: runner.run(tests)
return
# End.
| {
"repo_name": "bhavanaananda/DataStage",
"path": "test/MiscLib/TestUtils.py",
"copies": "2",
"size": "4223",
"license": "mit",
"hash": -5496629479650041000,
"line_mean": 40,
"line_max": 109,
"alpha_frac": 0.6057305233,
"autogenerated": false,
"ratio": 4.168805528134254,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5774536051434255,
"avg_score": null,
"num_lines": null
} |
""" Test functions in peloton.utils """
from unittest import TestCase
from peloton.utils import chop
from peloton.utils import getClassFromString
from peloton.utils import deCompound
class Test_littleUtils(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_chop(self):
match = 'hello world'
s = 'hello world\n'
self.assertEquals(chop(s), match)
s = 'hello world\r'
self.assertEquals(chop(s), match)
s = 'hello world\r\n'
self.assertEquals(chop(s), match)
s = 'hello world\n\r'
self.assertEquals(chop(s), match)
s = 'hello world\r*'
self.assertEquals(chop(s,extras='*'), match)
def test_getClassFromString(self):
c = 'peloton.kernel.PelotonKernel'
cls = getClassFromString(c)
from peloton.kernel import PelotonKernel as MatchClass
self.assertEquals(cls, MatchClass)
self.assertRaises(Exception, getClassFromString, 'peloton.kernel.Bogus')
def test_deCompound(self):
master = ['a','b','c','d','e','f']
testa = ['a','b','c,d,e','f']
testb = ['a','b,c','d,e','f']
testc = ['a','b','c','d','e','f']
for i in [testa, testb, testc]:
self.assertEquals(deCompound(i), master)
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/tests/testUtils.py",
"copies": "1",
"size": "1490",
"license": "bsd-3-clause",
"hash": 3504891701961555000,
"line_mean": 30.0625,
"line_max": 80,
"alpha_frac": 0.6060402685,
"autogenerated": false,
"ratio": 3.3940774487471526,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45001177172471524,
"avg_score": null,
"num_lines": null
} |
# $Id: TestWebDAVAccess.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileAccess module
#
import os
# Make sure python-kerberos package is installed
import kerberos
import sys
import httplib
import urllib2
import urllib2_kerberos
import re
import base64
import unittest
from urlparse import urlparse
sys.path.append("../..")
readmetext="This directory is the root of the ADMIRAL shared file system.\n"
mountpoint="mountadmiralwebdav"
readmefile="ADMIRAL.README"
theurl="http://zoo-admiral-silk.zoo.ox.ac.uk/webdav/ChrisHolland"
class TestWebDAVAccess(unittest.TestCase):
def setUp(self):
# mount WebDAV share here
status=os.system('mount '+mountpoint)
self.assertEqual(status, 0, 'Mount failure')
return
def tearDown(self):
os.system('umount '+mountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMe(self):
# Test assumes ADMIRAL shared file system is mounted at mountpoint
# Open README file
f = open(mountpoint+'/'+readmefile)
assert (f), "README file open failed"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, readmetext, 'Unexpected README content')
return
def testCreateFile(self):
f = open(mountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(mountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
return
def testUpdateFile(self):
filename = mountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
return
def testRewriteFile(self):
filename = mountpoint+'/testRewriteWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'w+')
f.write('Test rewrite of file\n')
f.close()
f = open(filename,'r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test rewrite of file\n', 'Unexpected file content')
return
def testDeleteFile(self):
filename1 = mountpoint+'/testCreateWebDAVFile.tmp'
filename2 = mountpoint+'/testRewriteWebDAVFile.tmp'
filename3 = mountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
# Test and delete third file
try:
s = os.stat(filename3)
except:
assert (False), "File "+filename3+" not found or other stat error"
os.remove(filename3)
try:
s = os.stat(filename3)
assert (False), "File "+filename3+" not deleted"
except:
pass
return
def testWebDAVFile(self):
h1 = httplib.HTTPConnection('zakynthos.zoo.ox.ac.uk')
h1.request('GET','/webdav')
res=h1.getresponse()
authreq = str(res.status) + ' ' + res.reason
print authreq
self.assertEqual(authreq, '401 Authorization Required', 'Unexpected response')
return
def testWebDAVFileUrlLib(self):
#_ignore = kerberos.GSS_C_DELEG_FLAG
#from kerberos import GSS_C_DELEG_FLAG,GSS_C_MUTUAL_FLAG,GSS_C_SEQUENCE_FLAG
#_ignore, ctx = kerberos.authGSSClientInit('krbtgt/OX.AC.UK@OX.AC.UK', gssflags=GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG)
_ignore, ctx = kerberos.authGSSClientInit('HTTP@zakynthos.zoo.ox.ac.uk')
_ignore = kerberos.authGSSClientStep(ctx, '')
tgt = kerberos.authGSSClientResponse(ctx)
opener = urllib2.build_opener()
opener.add_handler(urllib2_kerberos.HTTPKerberosAuthHandler())
resp = opener.open(theurl)
print resp
return
req = urllib2.Request(theurl)
try:
handle = urllib2.urlopen(req)
except IOError, e:
pass
else:
assert (False), theurl + " isn't protected by authentication."
if not hasattr(e, 'code') or e.code != 401:
# we got an error - but not a 401 error
assert (False), theurl + " Error: " + e
authline = e.headers['www-authenticate']
# this gets the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authobj = re.compile(
r'''(?:\s*www-authenticate\s*:)?\s*(\w*)\s+realm=['"]([^'"]+)['"]''',
re.IGNORECASE)
# this regular expression is used to extract scheme and realm
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
assert (False), "Malformed authentication header: " + authline
scheme = matchobj.group(1)
realm = matchobj.group(2)
# here we've extracted the scheme
# and the realm from the header
print scheme
print realm
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileAccess", getTestSuite, sys.argv)
# End.
| {
"repo_name": "tectronics/admiral-jiscmrd",
"path": "test/FileShare/tests/TestWebDAVAccess.py",
"copies": "1",
"size": "7831",
"license": "mit",
"hash": -8734683012315999000,
"line_mean": 29.7098039216,
"line_max": 143,
"alpha_frac": 0.5844719704,
"autogenerated": false,
"ratio": 3.8979591836734695,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9912795209831609,
"avg_score": 0.013927188848372105,
"num_lines": 255
} |
"""$Id: text_html.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for HTML text output"""
from base import BaseFormatter
import feedvalidator
from xml.sax.saxutils import escape
from feedvalidator.logging import Message, Info, Warning, Error
from config import DOCSURL
def escapeAndMark(x):
html = escape(x)
# Double-escape, and highlight, illegal characters.
for i in range(len(html)-1,-1,-1):
c = ord(html[i])
if 0x80 <= c <= 0x9F or c == 0xfffd:
if c == 0xfffd:
e = '?'
else:
e = '\\x%02x' % (c)
html = '%s<span class="badOctet">%s</span>%s' % (html[:i], e, html[i+1:])
return html.replace(" "," ")
class Formatter(BaseFormatter):
FRAGMENTLEN = 80
def __init__(self, events, rawdata):
BaseFormatter.__init__(self, events)
self.rawdata = rawdata
def getRootClass(self, aClass):
base = aClass.__bases__[0]
if base == Message: return aClass
if base.__name__.split('.')[-1] == 'LoggedEvent':
return aClass
else:
return self.getRootClass(base)
def getHelpURL(self, event):
rootClass = self.getRootClass(event.__class__).__name__
rootClass = rootClass.split('.')[-1]
rootClass = rootClass.lower()
# messageClass = self.getMessageClass(event).__name__.split('.')[-1]
messageClass = event.__class__.__name__.split('.')[-1]
return DOCSURL + '/' + rootClass + '/' + messageClass
def mostSeriousClass(self):
ms=0
for event in self.data:
level = -1
if isinstance(event,Info): level = 1
if isinstance(event,Warning): level = 2
if isinstance(event,Error): level = 3
ms = max(ms, level)
return [None, Info, Warning, Error][ms]
def header(self):
return '<ul>'
def footer(self):
return '</ul>'
def format(self, event):
if event.params.has_key('line'):
line = event.params['line']
if line >= len(self.rawdata.split('\n')):
# For some odd reason, UnicodeErrors tend to trigger a bug
# in the SAX parser that misrepresents the current line number.
# We try to capture the last known good line number/column as
# we go along, and now it's time to fall back to that.
line = event.params['line'] = event.params.get('backupline',0)
column = event.params['column'] = event.params.get('backupcolumn',0)
column = event.params['column']
codeFragment = self.rawdata.split('\n')[line-1]
markerColumn = column
if column > self.FRAGMENTLEN:
codeFragment = '... ' + codeFragment[column-(self.FRAGMENTLEN/2):]
markerColumn = 5 + (self.FRAGMENTLEN/2)
if len(codeFragment) > self.FRAGMENTLEN:
codeFragment = codeFragment[:(self.FRAGMENTLEN-4)] + ' ...'
else:
codeFragment = ''
line = None
markerColumn = None
html = escapeAndMark(codeFragment)
rc = u'<li><p>'
if line:
rc += u'''<a href="#l%s">''' % line
rc += u'''%s</a>, ''' % self.getLine(event)
rc += u'''%s: ''' % self.getColumn(event)
if 'value' in event.params:
rc += u'''<span class="message">%s: <code>%s</code></span>''' % (escape(self.getMessage(event)), escape(event.params['value']))
else:
rc += u'''<span class="message">%s</span>''' % escape(self.getMessage(event))
rc += u'''%s ''' % self.getCount(event)
rc += u'''[<a title="more information about this error" href="%s.html">help</a>]</p>''' % self.getHelpURL(event)
rc += u'''<blockquote><pre>''' + html + '''<br />'''
if markerColumn:
rc += u' ' * markerColumn
rc += u'''<span class="marker">^</span>'''
rc += u'</pre></blockquote></li>'
return rc
| {
"repo_name": "manderson23/NewsBlur",
"path": "vendor/feedvalidator/formatter/text_html.py",
"copies": "16",
"size": "3945",
"license": "mit",
"hash": -6186030426157160000,
"line_mean": 33.9115044248,
"line_max": 133,
"alpha_frac": 0.5972116603,
"autogenerated": false,
"ratio": 3.3920894239036974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: text_xml.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
"""Output class for xml output"""
from base import BaseFormatter
from feedvalidator.logging import *
import feedvalidator
def xmlEncode(value):
value = value.replace('&', '&')
value = value.replace('<', '<')
value = value.replace('>', '>')
value = value.replace('"', '"')
value = value.replace("'", ''')
return value
class Formatter(BaseFormatter):
def format(self, event):
params = event.params
params['type'] = event.__class__.__name__
params['text'] = self.getMessage(event)
# determine the level of severity
level = 'unknown'
if isinstance(event,Info): level = 'info'
if isinstance(event,Warning): level = 'warning'
if isinstance(event,Error): level = 'error'
params['level'] = level
# organize fixed elements into a known order
order = params.keys()
order.sort()
for key in ['msgcount', 'text', 'column', 'line', 'type', 'level']:
if key in order:
order.remove(key)
order.insert(0,key)
# output the elements
result = "<%s>\n" % level
for key in order:
value = xmlEncode(str(params[key]))
pub_key = key
if key == "backupcolumn":
pubkey = "column"
elif key == "backupline":
pubkey = "line"
result = result + (" <%s>%s</%s>\n" % (key, value, key))
result = result + "</%s>\n" % level
return result
| {
"repo_name": "waltharius/NewsBlur",
"path": "vendor/feedvalidator/formatter/text_xml.py",
"copies": "16",
"size": "1709",
"license": "mit",
"hash": 6056413344371686000,
"line_mean": 29.5178571429,
"line_max": 94,
"alpha_frac": 0.6003510825,
"autogenerated": false,
"ratio": 3.438631790744467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015640785303048264,
"num_lines": 56
} |
# $Id: tftp.py 23 2006-11-08 15:45:33Z dugsong $
"""Trivial File Transfer Protocol."""
import struct
import dpkt
# Opcodes
OP_RRQ = 1 # read request
OP_WRQ = 2 # write request
OP_DATA = 3 # data packet
OP_ACK = 4 # acknowledgment
OP_ERR = 5 # error code
# Error codes
EUNDEF = 0 # not defined
ENOTFOUND = 1 # file not found
EACCESS = 2 # access violation
ENOSPACE = 3 # disk full or allocation exceeded
EBADOP = 4 # illegal TFTP operation
EBADID = 5 # unknown transfer ID
EEXISTS = 6 # file already exists
ENOUSER = 7 # no such user
class TFTP(dpkt.Packet):
__hdr__ = (('opcode', 'H', 1), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.opcode in (OP_RRQ, OP_WRQ):
l = self.data.split('\x00')
self.filename = l[0]
self.mode = l[1]
self.data = ''
elif self.opcode in (OP_DATA, OP_ACK):
self.block = struct.unpack('>H', self.data[:2])
self.data = self.data[2:]
elif self.opcode == OP_ERR:
self.errcode = struct.unpack('>H', self.data[:2])
self.errmsg = self.data[2:].split('\x00')[0]
self.data = ''
def __len__(self):
return len(str(self))
def __str__(self):
if self.opcode in (OP_RRQ, OP_WRQ):
s = '%s\x00%s\x00' % (self.filename, self.mode)
elif self.opcode in (OP_DATA, OP_ACK):
s = struct.pack('>H', self.block)
elif self.opcode == OP_ERR:
s = struct.pack('>H', self.errcode) + ('%s\x00' % self.errmsg)
else:
s = ''
return self.pack_hdr() + s + self.data
| {
"repo_name": "somethingnew2-0/CS642-HW2",
"path": "dpkt/tftp.py",
"copies": "15",
"size": "1718",
"license": "mit",
"hash": 8157684574969928000,
"line_mean": 30.2363636364,
"line_max": 74,
"alpha_frac": 0.5279394645,
"autogenerated": false,
"ratio": 3.1465201465201464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011383082045086299,
"num_lines": 55
} |
# $Id: TimedeltaField.py 638 2009-10-09 13:06:17Z tguettler $
# $HeadURL: svn+ssh://svnserver/svn/djangotools/trunk/dbfields/TimedeltaField.py $
# http://www.djangosnippets.org/snippets/1060/
# Python
import datetime
# Django
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
SECS_PER_DAY=3600*24
#south migration patch
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^util\.snippets\.TimedeltaField\.TimedeltaField"])
except ImportError:
pass
#end of south migration patch
class TimedeltaField(models.Field):
u'''
Store Python's datetime.timedelta in an integer column.
Most databasesystems only support 32 Bit integers by default.
'''
__metaclass__=models.SubfieldBase
def __init__(self, *args, **kwargs):
super(TimedeltaField, self).__init__(*args, **kwargs)
def to_python(self, value):
if (value is None) or str(value) == '':
return None
if isinstance(value, datetime.timedelta):
return value
if not isinstance(value, (int, long)):
value = 0
try:
value = long(value)
except:
pass
return datetime.timedelta(seconds=value)
def get_internal_type(self):
return 'IntegerField'
def get_db_prep_lookup(self, lookup_type, value):
raise NotImplementedError() # SQL WHERE
def get_db_prep_save(self, value):
if (value is None) or isinstance(value, (int, long)):
return value
return SECS_PER_DAY*value.days+value.seconds
def formfield(self, *args, **kwargs):
defaults={'form_class': TimedeltaFormField}
defaults.update(kwargs)
return super(TimedeltaField, self).formfield(*args, **defaults)
class TimedeltaFormField(forms.Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
}
def __init__(self, *args, **kwargs):
defaults={'widget': TimedeltaWidget}
defaults.update(kwargs)
super(TimedeltaFormField, self).__init__(*args, **defaults)
def clean(self, value):
# value comes from Timedelta.Widget.value_from_datadict(): tuple of strings
super(TimedeltaFormField, self).clean(value)
assert len(value)==len(self.widget.inputs), (value, self.widget.inputs)
i=0
for value, multiply in zip(value, self.widget.multiply):
try:
i+=int(value)*multiply
except ValueError, TypeError:
raise forms.ValidationError(self.error_messages['invalid'])
return i
def to_python(self, value):
if (value is None) or str(value) == '':
return None
if isinstance(value, datetime.timedelta):
return value
if isinstance(value, (tuple, list) ):
assert len(value)==len(self.widget.inputs), (value, self.widget.inputs)
i=0
for value, multiply in zip(value, self.widget.multiply):
try:
i+=int(value)*multiply
except ValueError, TypeError:
pass
return datetime.timedelta( 0,i )
if isinstance(value, dict ):
return datetime.timedelta(**value)
if not isinstance(value, (int, long)):
value = 0
try:
value = long(value)
except:
pass
return datetime.timedelta(seconds=value)
class TimedeltaWidget(forms.Widget):
INPUTS=['days', 'hours', 'minutes', 'seconds']
INPUTNAMES=[_('Days:'), _('Hours:'), _('Minutes:'), _('Seconds:')]
MULTIPLY=[60*60*24, 60*60, 60, 1]
def __init__(self, attrs=None):
self.widgets=[]
if not attrs:
attrs={}
inputs=attrs.get('inputs', self.INPUTS)
inputnames=attrs.get('inputnames', self.INPUTNAMES)
multiply=[]
for input in inputs:
assert input in self.INPUTS, (input, self.INPUT)
final_attrs = {'class':'timedelta_'+input}
final_attrs.update(attrs)
self.widgets.append(forms.TextInput(attrs=final_attrs))
multiply.append(self.MULTIPLY[self.INPUTS.index(input)])
self.inputs=inputs
self.inputnames=inputnames
self.multiply=multiply
super(TimedeltaWidget, self).__init__(attrs)
def render(self, name, value, attrs):
if value is None:
values=[0 for i in self.inputs]
elif isinstance(value, datetime.timedelta):
values=split_seconds(value.days*SECS_PER_DAY+value.seconds, self.inputs, self.multiply)
elif isinstance(value, (int, long)):
# initial data from model
values=split_seconds(value, self.inputs, self.multiply)
else:
assert isinstance(value, tuple), (value, type(value))
assert len(value)==len(self.inputs), (value, self.inputs)
values=value
id=attrs.pop('id')
assert not attrs, attrs
rendered=[]
for input, inputname, widget, val in zip(self.inputs, self.inputnames, self.widgets, values):
rendered.append(u'%s %s' % (inputname, widget.render('%s_%s' % (name, input), val)))
return mark_safe('<p class="timedelta" id="%s">%s</p>' % (id, ' '.join(rendered)))
def value_from_datadict(self, data, files, name):
# Don't throw ValidationError here, just return a tuple of strings.
ret=[]
for input, multi in zip(self.inputs, self.multiply):
ret.append(data.get('%s_%s' % (name, input), 0))
return tuple(ret)
def _has_changed(self, initial_value, data_value):
# data_value comes from value_from_datadict(): A tuple of strings.
if initial_value is None:
initial_value = datetime.timedelta(0)
print "HAS CHANGED:",initial_value, data_value
assert isinstance(initial_value, datetime.timedelta), initial_value
initial=tuple([unicode(i) for i in split_seconds(initial_value.days*SECS_PER_DAY+initial_value.seconds, self.inputs, self.multiply)])
assert len(initial)==len(data_value)
#assert False, (initial, data_value)
return bool(initial!=data_value)
def main():
assert split_seconds(1000000)==[11, 13, 46, 40]
field=TimedeltaField()
td=datetime.timedelta(days=10, seconds=11)
s=field.get_db_prep_save(td)
assert isinstance(s, (int, long)), (s, type(s))
td_again=field.to_python(s)
assert td==td_again, (td, td_again)
td=datetime.timedelta(seconds=11)
s=field.get_db_prep_save(td)
td_again=field.to_python(s)
assert td==td_again, (td, td_again)
field=TimedeltaFormField()
assert field.widget._has_changed(datetime.timedelta(seconds=0), (u'0', u'0', u'0', u'0',)) is False
assert field.widget._has_changed(datetime.timedelta(days=1, hours=2, minutes=3, seconds=4), (u'1', u'2', u'3', u'4',)) is False
print "unittest OK"
def split_seconds(secs, inputs=TimedeltaWidget.INPUTS, multiply=TimedeltaWidget.MULTIPLY):
ret=[]
for input, multi in zip(inputs, multiply):
count, secs = divmod(secs, multi)
ret.append(count)
return ret
if __name__=='__main__':
main()
| {
"repo_name": "TigerND/dtx-core",
"path": "src/utils/snippets/TimedeltaField.py",
"copies": "1",
"size": "7378",
"license": "mit",
"hash": 277049678864985600,
"line_mean": 36.0753768844,
"line_max": 141,
"alpha_frac": 0.6150718352,
"autogenerated": false,
"ratio": 3.82279792746114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883406855601291,
"avg_score": 0.010892581411969882,
"num_lines": 199
} |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
# @author Shawn Schaffert <sms@eecs.berkeley.edu>
import threading
class Timer( object ) :
def __init__( self , callback=None , period=0 , numFiring=0 , waitTime=0 ) :
self.period = period #must be >= 0
self.waitTime = waitTime #must be >=0
self.numFiring = numFiring # 0 = forever, 1 = one-shot , 2+ = finite repeats
self.callback = callback
def __fireNext( self ) :
if self.numFiring == 0 :
self.timer = threading.Timer( self.period , self.__callback ).start()
elif self.remainingFirings == 0 :
self.timer = None
else :
self.timer = threading.Timer( self.period , self.__callback ).start()
self.remainingFirings -= 1
def __callback( self ) :
if self.stopTimer :
self.timer = None
else :
self.__fireNext()
if self.callback:
self.callback()
def __waitOver( self ) :
self.__fireNext()
def start( self ) :
self.timer = None
self.remainingFirings = self.numFiring
self.stopTimer = False
if self.waitTime > 0 :
self.timer = threading.Timer( self.waitTime , self.__waitOver ).start()
else :
self.__fireNext()
def cancel( self ) :
self.stopTimer = True
| {
"repo_name": "fresskarma/tinyos-1.x",
"path": "tools/python/pytos/util/Timer.py",
"copies": "1",
"size": "2324",
"license": "bsd-3-clause",
"hash": 5328570893840340000,
"line_mean": 35.3125,
"line_max": 81,
"alpha_frac": 0.6940619621,
"autogenerated": false,
"ratio": 3.4947368421052634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4688798804205263,
"avg_score": null,
"num_lines": null
} |
# $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Transparent Network Substrate."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class TNS(dpkt.Packet):
"""Transparent Network Substrate.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of TNS.
TODO.
"""
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
def test_tns():
s = (b'\x00\x23\x00\x00\x01\x00\x00\x00\x01\x34\x01\x2c\x00\x00\x08\x00\x7f'
b'\xff\x4f\x98\x00\x00\x00\x01\x00\x01\x00\x22\x00\x00\x00\x00\x01\x01X')
t = TNS(s)
assert t.msg.startswith(b'\x01\x34')
# test a truncated packet
try:
t = TNS(s[:-10])
except dpkt.NeedData:
pass
if __name__ == '__main__':
test_tns()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/tns.py",
"copies": "3",
"size": "1324",
"license": "bsd-3-clause",
"hash": -8422618580284051000,
"line_mean": 23.0727272727,
"line_max": 82,
"alpha_frac": 0.5196374622,
"autogenerated": false,
"ratio": 2.8230277185501067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840447556042522,
"avg_score": 0.0004435249415169094,
"num_lines": 55
} |
# $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Transparent Network Substrate."""
import dpkt
class TNS(dpkt.Packet):
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
def test_tns():
s = ('\x00\x23\x00\x00\x01\x00\x00\x00\x01\x34\x01\x2c\x00\x00\x08\x00\x7f'
'\xff\x4f\x98\x00\x00\x00\x01\x00\x01\x00\x22\x00\x00\x00\x00\x01\x01X')
t = TNS(s)
assert t.msg.startswith('\x01\x34')
# test a truncated packet
try:
t = TNS(s[:-10])
except dpkt.NeedData:
pass
if __name__ == '__main__':
test_tns()
print 'Tests Successful...'
| {
"repo_name": "hexcap/dpkt",
"path": "dpkt/tns.py",
"copies": "6",
"size": "1079",
"license": "bsd-3-clause",
"hash": -8576773103628848000,
"line_mean": 23.5227272727,
"line_max": 81,
"alpha_frac": 0.4958294717,
"autogenerated": false,
"ratio": 2.6381418092909534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6133971280990953,
"avg_score": null,
"num_lines": null
} |
"""A button widget, which can retain its state."""
from ButtonBase import ButtonBase
from Label import Label
from Constants import *
import base
class ToggleButton (ButtonBase):
"""ToggleButton (text) -> ToggleButton
A button widget class, which can retain its state.
The default ToggleButton widget looks and behaves usually the same
as the Button widget except that it will retain its state upon
clicks.
The state of the ToggleButton can be set with the 'active' attribute
or set_active() method. If the ToggleButton is active, the 'state'
attribute will be set to STATE_ACTIVE by default and will be reset,
if the ToggleButton is not active anymore.
toggle.active = True
toggle.set_active (False)
The ToggleButton supports different border types by setting its
'border' attribute to a valid value of the BORDER_TYPES constants.
toggle.border = BORDER_SUNKEN
toggle.set_border (BORDER_SUNKEN)
Default action (invoked by activate()):
The Button emulates a SIG_TOGGLED event and runs the connected
callbacks.
Mnemonic action (invoked by activate_mnemonic()):
See the Button class.
Signals:
SIG_TOGGLED - Invoked, when the ToggleButton is toggled.
Attributes:
active - The current state of the ToggleButton as boolean.
border - The border style to set for the ToggleButton.
text - The text to display on the ToggleButton.
"""
def __init__ (self, text=None):
ButtonBase.__init__ (self)
self._border = BORDER_RAISED
# Internal click handler
self.__click = False
self._active = False
# The ToggleButton emits a 'toggled' event.
self._signals[SIG_TOGGLED] = []
self.set_text (text)
def set_text (self, text=None):
"""T.set_text (...) -> None
Sets the text to display on the ToggleButton.
Sets the text to display on the ToggleButton by referring to the
'text' attribute of its child Label.
"""
if text != None:
if self.child:
self.child.set_text (text)
else:
self.child = Label (text)
else:
self.child = None
def get_text (self):
"""T.get_text () -> string
Returns the set text of the ToggleButton.
Returns the text set on the Label of the ToggleButton.
"""
if self.child:
return self.child.text
return ""
def set_border (self, border):
"""T.set_border (...) -> None
Sets the border type to be used by the ToggleButton.
Raises a ValueError, if the passed argument is not a value from
BORDER_TYPES
"""
if border not in BORDER_TYPES:
raise ValueError ("border must be a value from BORDER_TYPES")
self._border = border
self.dirty = True
def set_child (self, child=None):
"""B.set_child (...) -> None
Sets the Label to display on the Button.
Creates a parent-child relationship from the Button to a Label
and causes the Label to set its mnemonic widget to the Button.
Raises a TypeError, if the passed argument does not inherit
from the Label class.
"""
self.lock ()
if child and not isinstance (child, Label):
raise TypeError ("child must inherit from Label")
ButtonBase.set_child (self, child)
if child:
child.set_widget (self)
if not child.style:
child.style = self.style or \
base.GlobalStyle.get_style (self.__class__)
self.unlock ()
def set_state (self, state):
"""T.set_state (...) -> None
Sets the state of the ToggleButton.
Sets the state of the ToggleButton and causes its child to set
its state to the same value.
"""
if (self.active and (state != STATE_ACTIVE)) or (self.state == state):
return
self.lock ()
if self.child:
self.child.state = state
ButtonBase.set_state (self, state)
self.unlock ()
def set_active (self, active):
"""T.set_active (...) -> None
Sets the state of the ToggleButton.
"""
if active:
if not self._active:
self._active = True
if self.sensitive:
self.state = STATE_ACTIVE
# Enforce an update here, because the state might have
# been already modified in the notify() method.
self.dirty = True
else:
if self._active:
self._active = False
if self.sensitive:
self.state = STATE_NORMAL
# Enforce an update here, because the state might have
# been already modified in the notify() method.
self.dirty = True
def activate (self):
"""T.activate () -> None
Activates the ToggleButton default action.
Activates the Button default action. This usually means toggling
the button, emulated by inverting the 'active' attribute and
running the attached callbacks for the SIG_TOGGLED signal.
"""
self.lock ()
if not self.sensitive:
return
self.focus = True
self.set_active (not self.active)
self.unlock ()
self.run_signal_handlers (SIG_TOGGLED)
def notify (self, event):
"""T.notify (event) -> None
Notifies the ToggleButton about an event.
"""
if not self.sensitive:
return
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
if event.signal == SIG_MOUSEDOWN:
if eventarea.collidepoint (event.data.pos):
self.focus = True
# The button only acts upon left clicks.
if event.data.button == 1:
self.__click = True
self.state = STATE_ACTIVE
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
event.handled = True
elif event.signal == SIG_MOUSEUP:
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEUP, event.data)
if event.data.button == 1:
if self.__click:
# The usual order for a ToggleButton: get a
# click and toggle state upon it.
self.run_signal_handlers (SIG_CLICKED)
self.set_active (not self.active)
self.run_signal_handlers (SIG_TOGGLED)
if not self.active:
self.state = STATE_ENTERED
event.handled = True
elif (event.data.button == 1) and self.__click and \
not self.active:
# Only a half click was made, reset the state of the
# ToggleButton.
self.state = STATE_NORMAL
elif event.signal == SIG_MOUSEMOVE:
ButtonBase.notify (self, event)
else:
# Any other event will be escalated to the parent(s).
ButtonBase.notify (self, event)
def draw_bg (self):
"""T.draw () -> Surface
Draws the ToggleButton background surface and returns it.
Creates the visible surface of the ToggleButton and returns it
to the caller.
"""
return base.GlobalStyle.engine.draw_button (self)
def draw (self):
"""R.draw () -> None
Draws the ToggleButton surface and places its Label on it.
"""
ButtonBase.draw (self)
if self.child:
self.child.center = self.image.get_rect ().center
self.image.blit (self.child.image, self.child.rect)
active = property (lambda self: self._active,
lambda self, var: self.set_active (var),
doc = "The state of the ToggleButton.")
text = property (lambda self: self.get_text (),
lambda self, var: self.set_text (var),
doc = "The text of the ToggleButton.")
border = property (lambda self: self._border,
lambda self, var: self.set_border (var),
doc = "The border style to set for the ToggleButton.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/ToggleButton.py",
"copies": "1",
"size": "10058",
"license": "bsd-2-clause",
"hash": -2289239302563515400,
"line_mean": 35.4420289855,
"line_max": 78,
"alpha_frac": 0.5896798568,
"autogenerated": false,
"ratio": 4.498211091234347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587890948034346,
"avg_score": null,
"num_lines": null
} |
"""Tooltip Window class."""
from BaseWidget import BaseWidget
from Constants import *
from StyleInformation import StyleInformation
import base
class TooltipWindow (BaseWidget):
"""TooltipWindow (text) -> TooltipWindow
A widget class that displays a line of text using a certain color.
The TooltipWindow widgets is able to display a short amount of text.
It is a completely non-interactive widget suitable for tooltip support
and notification messages.
The text to display on the TooltipWindow can be set through the 'text'
attribute or set_text() method.
window.text = 'A text to display'
window.set_text ('A text to display')
The 'padding' attribute and set_padding() method are used to place a
certain amount of pixels between the text and the outer edges of the
TooltipWindow.
window.padding = 10
window.set_padding (10)
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
text - The text to display on the TooltipWindow.
padding - Additional padding between text and borders. Default is 2.
"""
def __init__ (self, text):
BaseWidget.__init__ (self)
self._padding = 2
self._text = None
self.set_text (text)
def set_focus (self, focus=True):
"""T.set_focus (...) -> bool
Overrides the default widget input focus.
The TooltipWindow cannot be focused by default, thus this method
always returns False and does not do anything.
"""
return False
def set_padding (self, padding):
"""T.set_padding (...) -> None
Sets the padding between the edges and text of the TooltipWindow.
The padding value is the amount of pixels to place between the
edges of the TooltipWindow and the displayed text.
Raises a TypeError, if the passed argument is not a positive
integer.
Note: If the 'size' attribute is set, it can influence the
visible space between the text and the edges. That does not
mean, that any padding is set.
"""
if (type (padding) != int) or (padding < 0):
raise TypeError ("padding must be a positive integer")
self._padding = padding
self.dirty = True
def set_text (self, text):
"""T.set_text (...) -> None
Sets the text of the TooltipWindow to the passed argument.
Raises a TypeError, if the passed argument is not a string or
unicode.
"""
if type (text) not in (str, unicode):
raise TypeError ("text must be a string or unicode")
self._text = text
self.dirty = True
def draw_bg (self):
"""T.draw_bg () -> Surface
Draws the background surface of the TooltipWindow and returns it.
"""
return base.GlobalStyle.engine.draw_tooltipwindow (self)
text = property (lambda self: self._text,
lambda self, var: self.set_text (var),
doc = "The text to display on the TooltipWindow.")
padding = property (lambda self: self._padding,
lambda self, var: self.set_padding (var),
doc = "Additional padding between text and borders.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/TooltipWindow.py",
"copies": "1",
"size": "4749",
"license": "bsd-2-clause",
"hash": -2738372936177544000,
"line_mean": 36.6904761905,
"line_max": 78,
"alpha_frac": 0.6727732154,
"autogenerated": false,
"ratio": 4.467544684854186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5640317900254186,
"avg_score": null,
"num_lines": null
} |
# $Id: Train.py 36 2010-01-14 19:20:52Z tlev $
# -*- coding: iso-8859-1 -*-
"""
Created on 20. okt. 2009
Original code written by Odd A. Hjelkrem, this was later changed by Tomas Levin by segmenting into
functions and a change to numerical integration was used to calculate energy in the acceleration phase.
Format of the input data file was also changed so that calculation of CR is done within the script and not outside
Energy efficiency is included as a last step. Values are stored in the Train data file: ./data/train.csv
For diesle trains there is one factor for all losses from fuel to wheels, for electric trains 2 factors are used one for losses
from the pantograph to the wheels and another factor for losses in the catenary system.
@author: Levin and Hjelkrem
"""
import SEMBA as S
import sys
#Variable for turning on DEBUG printing
#DEBUG = True
DEBUG = False
def CreateTrain():
S.load_Train()
from math import sqrt
def calcCr(speed, massLoco, massWagons, massTrain, axels, Fsl, Csv, C1, C2,Fat ):
""" Calculation of Cr
Data is based on documentation in ARTEMIS deliverabel D7a and D7 part b
D7A page 28-29 , D7 part B page 30-33
There are inacuracies between the two reports when it comes to these equations.
D7a seems to have the correct equations based on comparison of text and examples.
The equations from D7a are used
"""
refSpeed = 27.778 # reference speed in m/s
Fsv = float(Csv)+(Fat*axels)/(massTrain*S.g)
#print Fsv
C0 = (float(Fsl)*float(massLoco)+float(Fsv)*float(massWagons))/float(massTrain)
#print C0
Cr = C0+C1*(float(speed)/float(refSpeed))+C2*(pow((float(speed)/float(refSpeed)),2))
#print Cr
return Cr
def calcCl(CLl,CLw,nWagons):
"""Caclulation of CL
This function assumes that wagons are homogeneous and loaded, factors for
locomotive and wagon resistance.
CLl Loco -read from file
CLw Wagon -read from file
nWagons -number of wagons read from file
"""
CL=float(CLl)+float(CLw)*float(nWagons)
return CL
def calcAccEnergy(initialSpeed,topSpeed, acceleration,distance,Cl,area,massLoco,massWagons,grade,Csv,C1,C2,Fsl,axels):
""" Calculate Energy usage in acceleration phase
Calculates energy usage in acceleration phase, assumes constant distance over segment.
Numeric integration is used, with 1 meter resolution, rolling and air resistance
is calculated for every meter then summed up. Equation used is found in ARTEMIS deliverable D7a
page 13. The user needs to calculate the needed acceleration distance.
Result is Energy in Mega joule
"""
#rho = 1.2 # kg/m^3 The Air density
s=1 # distance increment in integration, one meter used
v0=initialSpeed
v=0
a= acceleration
calcSpeed =[]
calcDistance =[]
massTrain = float(massLoco) + float(massWagons)
EnergyUsed=[]
#calcSpeed.append(0)
#calcDistance.append(0)
for i in range(0,int(distance)):
v=sqrt((v0*v0)+(2*a*s))
if v>topSpeed:
v=topSpeed
v0=v
Cr=calcCr(v, massLoco, massWagons, massTrain, axels, Fsl, Csv, C1, C2,100)
ERoll = massTrain*(Cr*S.g+S.g*grade+acceleration)
EAir = 0.5*S.rho*Cl*area*pow(v,2)
E = (ERoll + EAir)/ 1000000.0 # Convert to MJ
EnergyUsed.append(E)
calcSpeed.append(v0)
calcDistance.append(i)
time =(v-initialSpeed)/a
print "Seconds used in acceleration phase: ", time
E = sum(EnergyUsed)
return E,calcDistance, calcSpeed
def calcConstEnergy(speed,distance,Cl,area,massLoco,massWagons,grade,Csv,C1,C2,Fsl,axels):
""" Calculate energy usage along a leg with constant speed
Equation used is found in ARTEMIS deliverable D7a
page 13.
Result is in Mega Joules
"""
if speed==0:
distance=0
massTrain = massLoco + massWagons
Cr=calcCr(speed, massLoco, massWagons, massTrain, axels, Fsl, Csv, C1, C2,100)
ERoll = massTrain*(Cr*S.g+S.g*grade)
EAir = 0.5*S.rho*Cl*area*pow(speed,2)
E = (ERoll+EAir)*distance/1000000 #result in Mega Joules
calcSpeed=[]
calcDistance=[]
for i in range(1,int(distance)):
calcSpeed.append(speed)
calcDistance.append(i)
return E, calcDistance,calcSpeed
def calcDeccelerationDistance(startSpeed,endSpeed,acc):
calcSpeed=[]
calcDistance=[]
distance = (pow(endSpeed,2)-pow(startSpeed,2))/(2*acc)
s=1 #resolution is 1 meter
v0=startSpeed
for i in range(0,int(distance-0.5)):
v=sqrt((v0*v0)+(2*acc*s))
v0=v
calcSpeed.append(v0)
calcDistance.append(i)
return calcDistance, calcSpeed
def calcLink(length, #The total length of the link to becalculated
entrySpeed, # Then speed when the train enters the link
avgSpeed, # The wanted average speed on the link
nStop, # The number og stops on the link
grade, # The gradient in the direction of travel.
acc, #the requested acceleration in m/s^2
decBreak, # The requested deceleration when breaking
trainID): #Train identification, found in data file
"""Function to calculate distance in three main phases
Entry phase: adjustment from previous link, 3 sub phases:
* Decelerate from previous link
* Constant speed previous link
* Accelerate from previous link
Stoping phase: distance for singel and N number of stops
Constant phase: distance of the link that is traversed with constant speed
The link max speed and constant speed is higher than average speed due to time
lost in acceleration sections. The new max speed is found iteratively to give the correct
average speed on the link
Energy is reported in Mega Joule
"""
tc=[] # Create an empty list to store train data
td = S.H_TRAIN[str(trainID)] #extract the train data from the train hash as a list
Cl = calcCl(float(td[3]),float(td[4]),float(td[9]))
area =float(td[5])
massLoco = float(td[6])
taraMassWagons =(float(td[7])) # weight of all wagons without payload
massPayload =(float(td[8])) #total payload of the train
totalMassWagons =taraMassWagons+massPayload
Csv = float(td[10])
C1 =float(td[11])
C2= float(td[12])
Fsl = float(td[13])
axels =float(td[14])
ErrorMessages = []
entryPhaseDistance=0
stoppingDistance=0
ConstantPhaseDistance=0
#Iteratively loop until average speed including stops equals average speed on link
topSpeed=avgSpeed
calcAvgSpeed = 0
difference=1
EntryMode='NONE' #variable to indicate if one has to accelerate or decelerate when entering link
#while avgSpeed>calcAvgSpeed:
while difference>0:
#Find the three phase distances
if str(entrySpeed)==str(topSpeed):
entryPhaseDistance=0
elif entrySpeed<topSpeed:
entryPhaseDistance =(pow(topSpeed,2)-pow(entrySpeed,2))/(2*acc)
EntryMode='ACC'
elif entrySpeed>topSpeed:
entryPhaseDistance =(pow(entrySpeed,2)-pow(topSpeed,2))/(2*decBreak)
EntryMode='DEC'
#find the distance in the stopping phase
if nStop==0:
stoppingDistance=0 #there are no stops
elif nStop >0:
stopBreakDist= (pow(topSpeed,2))/(2*decBreak*-1)
stopAccDist =(pow(topSpeed,2))/(2*acc)
stoppingDistance=(stopBreakDist+stopAccDist)*nStop
#calculate constant speed distance
if entryPhaseDistance >0:
ConstantPhaseDistance = length - (entryPhaseDistance+stoppingDistance)
else:
ConstantPhaseDistance = length - (entryPhaseDistance*-1+stoppingDistance)
if ConstantPhaseDistance<10:
ErrorMessages.append('Link is to short for acceleration and stops')
ErrorMessages.append('The constant part of the segment is to short')
#If the constant part is to short algorithm for finding new top speed will not converge.
sys.exit("ERROR: Not enough distance for the requested accelerations\n" +
"to many stops or to short distance")
#Calculate time usage in the different phases and new average speed because of this
if entryPhaseDistance >0:
tEntry=(topSpeed-entrySpeed)/acc
else:
tEntry=(topSpeed-entrySpeed)/decBreak
tStopp=((topSpeed/decBreak)*-1.0)*nStop
tStart=(topSpeed/acc)*nStop
tConstant=ConstantPhaseDistance/topSpeed
tTotal = tEntry+tStopp+tStart+tConstant
calcAvgSpeed=length/tTotal
if EntryMode=='DEC':
difference = avgSpeed - calcAvgSpeed
else:
#difference = calcAvgSpeed - avgSpeed
difference = avgSpeed - calcAvgSpeed
if difference<0.001:
difference=0.0
if calcAvgSpeed<avgSpeed:
topSpeed=topSpeed+0.05
if calcAvgSpeed>avgSpeed:
topSpeed=topSpeed-0.05
### DEBUG Statement
if DEBUG:
a=0
print "Top speed set to : ", topSpeed
if entryPhaseDistance > 0:
print "Entry acceleration distance : " + str(entryPhaseDistance)
a= entryPhaseDistance
else:
print "Entry acceleration distance : " + str(entryPhaseDistance*-1)
a= entryPhaseDistance*-1
print "Stopping distance : " + str(stoppingDistance)
print "Constant distance : " + str(ConstantPhaseDistance)
print "TOTAL distance : " + str(a+stoppingDistance+ConstantPhaseDistance)
#calculate energy usage in the different phases
#Initialization to hinder errors when no acceleration phase is present or if link entry speed is higher that calculated top speed
entryPhaseEnergy =[0,0,0]
singelAccEnergy =[0,0,0]
ConstantPhaseEnergy=[0,0,0]
stoppingPhaseEnergy=0
if entryPhaseDistance>0:
if entrySpeed<topSpeed: #there is a need to accelerate and energy will be used
entryPhaseEnergy= calcAccEnergy(entrySpeed,topSpeed,acc,entryPhaseDistance,Cl,area,massLoco,totalMassWagons,grade,Csv,C1,C2,Fsl,axels)
if nStop >0:
singelAccEnergy = calcAccEnergy(0,topSpeed,acc,stopAccDist,Cl,area,massLoco,totalMassWagons,grade,Csv,C1,C2,Fsl,axels)
stoppingPhaseEnergy =singelAccEnergy[0] * nStop #acc from stopping calculated for all stops
if ConstantPhaseDistance >0:
ConstantPhaseEnergy=calcConstEnergy(topSpeed,ConstantPhaseDistance,Cl,area,massLoco,totalMassWagons,grade,Csv,C1,C2,Fsl,axels)
else:
ConstantPhaseEnergy=[0,0,0]
totalLinkEnergy = entryPhaseEnergy[0]+ singelAccEnergy[0]*nStop+ConstantPhaseEnergy[0]
#Create data for a distance speed plot.
totDistance =[]
totSpeed=[]
if str(float(entrySpeed))!=str(float(topSpeed))and entryPhaseDistance >0:
for i in range(0,len(entryPhaseEnergy[1])):
totDistance.append(entryPhaseEnergy[1][i])
totSpeed.append(entryPhaseEnergy[2][i])
tempEntrySpeeds=calcDeccelerationDistance(entrySpeed,topSpeed,decBreak)
if str(float(entrySpeed))!=str(float(topSpeed))and entryPhaseDistance <0:
for i in range(0,len(tempEntrySpeeds[0])):
totDistance.append(tempEntrySpeeds[0][i])
totSpeed.append(tempEntrySpeeds[1][i])
stoppingSpeed = calcDeccelerationDistance(topSpeed,0,decBreak)
if nStop >0:
for i in range(0,nStop):
for j in range(0,len(stoppingSpeed[0])):
totDistance.append(stoppingSpeed[0][j])
totSpeed.append(stoppingSpeed[1][j])
#The next two lines enshure that speed reaches 0
totDistance.append(stoppingSpeed[0][j]+0.5)
totSpeed.append(0)
for k in range(0,len(singelAccEnergy[1])):
totDistance.append(singelAccEnergy[1][k])
totSpeed.append(singelAccEnergy[2][k])
### DEBUG Statement
if DEBUG:
a=0
b=0
c=0
if entryPhaseDistance >0:
print "Entry acceleration distance length: " + str(len(entryPhaseEnergy[1]))
a= len(entryPhaseEnergy[1])
if entryPhaseDistance <0:
print "Entry deceleration distance length: " + str(len(tempEntrySpeeds[1]))
a= len(tempEntrySpeeds[1])
if nStop > 0:
print "Acceleration length : " + str(len(singelAccEnergy[1])*nStop)
print "Decceleration length : " + str(len(stoppingSpeed[1])*nStop)
b= len(singelAccEnergy[1])*nStop
c= len(stoppingSpeed[1])*nStop
print "Constant phase lenght : " + str(len(ConstantPhaseEnergy[1]))
print "Total length from energy routines : " + str(a+b+c+len(ConstantPhaseEnergy[1]))
#we have now come to the constant speed strech
for i in range(0,int(ConstantPhaseDistance)):
totDistance.append(i)
totSpeed.append(topSpeed)
#Calculate needed energy when we include losses from
TOTtotalLinkEnergy = CalcEnergyIncludingLoss(totalLinkEnergy,trainID)
TOTentryPhaseEnergy= CalcEnergyIncludingLoss(entryPhaseEnergy[0],trainID)
TOTstoppingPhaseEnergy= CalcEnergyIncludingLoss(stoppingPhaseEnergy,trainID)
TOTConstantPhaseEnergy= CalcEnergyIncludingLoss(ConstantPhaseEnergy[0],trainID)
grossAvgEnergyUsage = float(TOTtotalLinkEnergy)/((float(length)/1000.0)*(float(totalMassWagons)/1000.0+float(massLoco)/1000.0))
netAvgEnergyUsage =float(TOTtotalLinkEnergy) / ((float(massPayload)/1000.0)*(float(length)/1000.0))
#Return energy usage, Total , EntryPhase, StoppingPhase , ConstantPhase
return TOTtotalLinkEnergy, TOTentryPhaseEnergy, TOTstoppingPhaseEnergy, TOTConstantPhaseEnergy, ErrorMessages, totDistance,totSpeed, grossAvgEnergyUsage, netAvgEnergyUsage
def CalcEnergyIncludingLoss(Energy,TrainID):
td = S.H_TRAIN[str(TrainID)]
LocomotiveType = td[1]
EnergyWithLoss = -1 # used to make sure that the user understands that an error occured
if LocomotiveType=='DIELSEL':
EnergyWithLoss = float(Energy) / float(td[15])
if LocomotiveType=='ELECTRIC':
EnergyWithLoss = float(Energy)/float(td[15])
return EnergyWithLoss
###########____Load Data____##################
CreateTrain()
#test segment for debuging purposes
if __name__ == "__main__":
import matplotlib.pyplot as plt
"""
#calcE(0,2.7,138)
# calcCr(speed, massLoco, massWagons, massTrain, axels, Fsl, Csv, C1, C2,Fat )
#print calcCr(26.45, 115000, 314000, 429000, 30, 0.004, 0.006, 0.0005, 0.0006,100 )
#print calcCr(27.778, 123000, 400000, 523000, 20, 0.004, 0.0006, 0.0005, 0.0006,100 )
#print calcAccEnergy(0,0.15,1000,6,10,115000,858880,0.0,0.0006,0.0005,0.0006,0.004,117)
test=[]
aks=[]
for i in range(-5,5):
test.append(calcConstEnergy(10,1000,6,10,115000,858880,i/100.0,0.0006,0.0005,0.0006,0.004,117))
aks.append(i/100.0)
plt.plot(aks, test)
plt.show()
"""
"""
#Data for Berkåk - Oppdal
a = calcLink(54900, #length, 3
0.0, #entrySpeed,
22.0, #avgSpeed,
5, #nStop,
-0.000, #grade,
0.3, #acc,
-0.5, #decBreak,
1) #Train ID
"""
#Data for Dombås Åndalsnes
Length = 11000
EntrySpeed = 0
AvgSpeed = 22
NuberOfStops = 1
Grade = 0
acc = 0.3
decBreak= -0.5
TrainID = 1
a=calcLink(Length,EntrySpeed,AvgSpeed,NuberOfStops,Grade,acc,decBreak,TrainID)
#a = calcLink(9089.99999999997,0,19.4289679381212,1,-0.00122112211221121,0.3,-0.5,3)
Oppover=[]
Nedover=[]
Nedover.append(calcLink(9089.99999999997,0,19.4289679381212,0,-0.00122112211221121,0.3,-0.5,3)[0])
Nedover.append(calcLink(8550.00000000001,19.4289679381212,19.4289679381212,0,-0.00164912280701754,0.3,-0.5,3)[0])
Nedover.append(calcLink(9099.99999999997,19.4289679381212,19.4289679381212,0,-0.000263736263736262,0.3,-0.5,3)[0])
Nedover.append(calcLink(10110,19.4289679381212,19.4289679381212,0,0.000148367952522255,0.3,-0.5,3)[0])
Nedover.append(calcLink(10840,19.4289679381212,19.4289679381212,0,-0.00165129151291513,0.3,-0.5,3)[0])
Nedover.append(calcLink(9109.99999999996,19.4289679381212,19.4289679381212,0,-0.00445664105378706,0.3,-0.5,3)[0])
Nedover.append(calcLink(18250,19.4289679381212,19.4289679381212,0,-0.0165095890410959,0.3,-0.5,3)[0])
Nedover.append(calcLink(13430,19.4289679381212,19.4289679381212,0,-0.0109233060312733,0.3,-0.5,3)[0])
Nedover.append(calcLink(7640.00000000004,19.4289679381212,19.4289679381212,0,-0.00801047120418844,0.3,-0.5,3)[0])
Nedover.append(calcLink(10090,19.4289679381212,19.4289679381212,0,-0.00555004955401389,0.3,-0.5,3)[0])
Nedover.append(calcLink(8029.99999999997,19.4289679381212,19.4289679381212,0,-0.000660024906600251,0.3,-0.5,3)[0])
print "Energi forbruk nedover"
for segment in Nedover:
print segment
Oppover.append(calcLink(8029.99999999997,0,17.3094056255568,0,0.000660024906600251,0.3,-0.5,3)[0])
Oppover.append(calcLink(10090,17.3094056255568,17.3094056255568,0,0.00555004955401389,0.3,-0.5,3)[0])
Oppover.append(calcLink(7640.00000000004,17.3094056255568,17.3094056255568,0,0.00801047120418844,0.3,-0.5,3)[0])
Oppover.append(calcLink(13430,17.3094056255568,17.3094056255568,0,0.0109233060312733,0.3,-0.5,3)[0])
Oppover.append(calcLink(18250,17.3094056255568,17.3094056255568,0,0.0165095890410959,0.3,-0.5,3)[0])
Oppover.append(calcLink(9109.99999999996,17.3094056255568,17.3094056255568,0,0.00445664105378706,0.3,-0.5,3)[0])
Oppover.append(calcLink(10840,17.3094056255568,17.3094056255568,0,0.00165129151291513,0.3,-0.5,3)[0])
Oppover.append(calcLink(10110,17.3094056255568,17.3094056255568,0,-0.000148367952522255,0.3,-0.5,3)[0])
Oppover.append(calcLink(9099.99999999997,17.3094056255568,17.3094056255568,0,0.000263736263736262,0.3,-0.5,3)[0])
Oppover.append(calcLink(8550.00000000001,17.3094056255568,17.3094056255568,0,0.00164912280701754,0.3,-0.5,3)[0])
Oppover.append(calcLink(9089.99999999997,17.3094056255568,17.3094056255568,0,0.00122112211221121,0.3,-0.5,3)[0])
print "Energi forbruk oppover"
for segment in Oppover:
print segment
print "testdata"
print sum(Nedover)
print "Total energy used :", a[0]
print "Entry phase energy :", a[1]
print "Stopping phase energy :", a[2]
print "Constant phase energy :", a[3]
print "The following error messages where created\n", a[4]
print '************************************************************'
print 'Energy per KiloJoul/tkm (Gross tonns): ', a[7]*1000 , " ", a[7]*1000/3.6, " Wh"
print 'Energy per KiloJoul/tkm (Net tonns) : ', a[8]*1000, " " , a[8]*1000/3.6, " Wh"
i=1
dist=[]
pltAvgSpeed=[]
for i in range(0, len(a[5])):
dist.append(i)
pltAvgSpeed.append(22.0)
#plt.axhline(linewidth=4, color='r')
plt.plot(dist,a[6])
plt.plot(dist,pltAvgSpeed)
"""
avgspd=[]
d=[]
for i in range(0,15000):
avgspd.append(22)
d.append(i)
plt.plot(d,avgspd)
"""
plt.show()
| {
"repo_name": "tomasle/semba",
"path": "SEMBA/TRAIN.py",
"copies": "1",
"size": "18348",
"license": "bsd-2-clause",
"hash": 6965378125238010000,
"line_mean": 39.6829268293,
"line_max": 173,
"alpha_frac": 0.7032919119,
"autogenerated": false,
"ratio": 3.01677079907925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.422006271097925,
"avg_score": null,
"num_lines": null
} |
"""A Renderer that plays nicely with a threadedselectreactor from
Twisted.
Many thanks to Bob Ippolito for his work on threadedselectreactor and
the pygamedemo.py he created.
Written by Ben Olsen <ben@rickstranger.com>
"""
from pygame import QUIT, event
from pygame import error as PygameError
from pygame import time as PygameTime
from ocempgui.widgets import Renderer
from ocempgui.widgets.Constants import *
class TwistedRenderer (Renderer):
"""TwistedRenderer () -> TwistedRenderer
A Renderer that allows the easy integration with Twisted.
Because Twisted's threadedselectreactor *must* be shut down before
the main loop shuts down, this Renderer will keep running until
explicitly told to stop.
Before starting the main loop, this Renderer will check to see if it
has a Twisted reactor attached to it. This is an attribute set like
any of the normal Renderer attributes:
self.reactor = reactor
If self.reactor is None (default), this will behave like a normal
Renderer. If self.reactor has been set, the QUIT signal will call
reactor.stop(), and then wait for reactor.addSystemEventTrigger to
call self.stop(). This function will then stop the main loop.
Usage
-----
Install the threadedselectreactor instead of the default reactor:
from twisted.internet.threadedselectreactor import install
install()
from twisted.internet import reactor
In the main section of your program, where you create the Renderer,
just set TwistedRenderer's reactor:
re = TwistedRenderer()
re.reactor = reactor
Everything else is handled internally by TwistedRenderer.
Attributes:
reactor - The twisted reactor attached to the TwistedRenderer.
"""
def __init__ (self):
Renderer.__init__ (self)
self._reactor = None
self._running = False
def start (self):
"""T.start () -> None
Overrides default start() to use self._running. If a reactor is
attached, interleave self.waker
"""
self._running = 1
if self._reactor != None:
self._reactor.interleave (self.waker)
self._loop()
def stop (self):
"""T.stop () -> None
Tells the internal loop to stop running.
"""
self._running = False
def set_reactor (self, reactor):
"""T.set_reactor (...) -> None
Sets the internal reactor.
"""
if not hasattr (reactor, 'interleave'):
raise AttributeError ("interleave() method not found in %s" %
reactor)
self._reactor = reactor
self._reactor.addSystemEventTrigger ('after', 'shutdown', self.stop)
def waker (self, func):
"""T.waker (...) -> None
Used in threadedselectreactor.interleave.
"""
event.post (event.Event (SIG_TWISTED, data=func))
def distribute_events (self, *events):
"""T.distribute_events (...) -> None
Overrides default distribute_events() to check for a reactor. If
a reactor is found, the QUIT signal will call reactor.stop(). If
there's no reactor attached, a QUIT signal will simply set
self._running to False.
"""
for event in events:
if event.type == QUIT:
if self._reactor != None:
self._reactor.stop ()
else:
self._running = False
elif event.type == SIG_TWISTED:
event.data ()
else:
Renderer.distribute_events (self, (event))
return True
def _loop (self):
"""T._loop () -> None
Overrides default _loop() so that it will not stop until
self._running is false.
"""
# Emit the tick event every 10 ms.
PygameTime.set_timer (SIG_TICK, 10)
delay = PygameTime.delay
event_get = event.get
pump = event.pump
while self._running:
pump ()
# Get events and distribute them.
events = event_get ()
if not self.distribute_events (*events):
return # QUIT event
if self.timer > 0:
delay (1000 / self.timer)
reactor = property (lambda self: self._reactor,
lambda self, var: self.set_reactor (var),
doc = "The twisted reactor attached to the Renderer.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/TwistedRenderer.py",
"copies": "1",
"size": "5915",
"license": "bsd-2-clause",
"hash": -7184643629516985000,
"line_mean": 34.6325301205,
"line_max": 78,
"alpha_frac": 0.6475063398,
"autogenerated": false,
"ratio": 4.501522070015221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5649028409815221,
"avg_score": null,
"num_lines": null
} |
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__ = 'reStructuredText'
import time
from docutils import nodes, utils
from docutils.transforms import Transform
class Decorations(Transform):
"""
Populate a document's decoration element (header, footer).
"""
default_priority = 820
def apply(self):
header_nodes = self.generate_header()
if header_nodes:
decoration = self.document.get_decoration()
header = decoration.get_header()
header.extend(header_nodes)
footer_nodes = self.generate_footer()
if footer_nodes:
decoration = self.document.get_decoration()
footer = decoration.get_footer()
footer.extend(footer_nodes)
def generate_header(self):
return None
def generate_footer(self):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings = self.document.settings
if settings.generator or settings.datestamp or settings.source_link \
or settings.source_url:
text = []
if settings.source_link and settings._source \
or settings.source_url:
if settings.source_url:
source = settings.source_url
else:
source = utils.relative_path(settings._destination,
settings._source)
text.extend([
nodes.reference('', 'View document source',
refuri=source),
nodes.Text('.\n')])
if settings.datestamp:
datestamp = time.strftime(settings.datestamp, time.gmtime())
text.append(nodes.Text('Generated on: ' + datestamp + '.\n'))
if settings.generator:
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils', refuri=
'http://docutils.sourceforge.net/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
return [nodes.paragraph('', '', *text)]
else:
return None
class ExposeInternals(Transform):
"""
Expose internal attributes if ``expose_internals`` setting is set.
"""
default_priority = 840
def not_Text(self, node):
return not isinstance(node, nodes.Text)
def apply(self):
if self.document.settings.expose_internals:
for node in self.document.traverse(self.not_Text):
for att in self.document.settings.expose_internals:
value = getattr(node, att, None)
if value is not None:
node['internal:' + att] = value
class Messages(Transform):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority = 860
def apply(self):
unfiltered = self.document.transform_messages
threshold = self.document.reporter.report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if messages:
section = nodes.section(classes=['system-messages'])
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
self.document.transform_messages[:] = []
self.document += section
class FilterMessages(Transform):
"""
Remove system messages below verbosity threshold.
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
if node['level'] < self.document.reporter.report_level:
node.parent.remove(node)
class TestMessages(Transform):
"""
Append all post-parse system messages to the end of the document.
Used for testing purposes.
"""
default_priority = 880
def apply(self):
for msg in self.document.transform_messages:
if not msg.parent:
self.document += msg
class StripComments(Transform):
"""
Remove comment elements from the document tree (only if the
``strip_comments`` setting is enabled).
"""
default_priority = 740
def apply(self):
if self.document.settings.strip_comments:
for node in self.document.traverse(nodes.comment):
node.parent.remove(node)
class StripClassesAndElements(Transform):
"""
Remove from the document tree all elements with classes in
`self.document.settings.strip_elements_with_classes` and all "classes"
attribute values in `self.document.settings.strip_classes`.
"""
default_priority = 420
def apply(self):
if not (self.document.settings.strip_elements_with_classes
or self.document.settings.strip_classes):
return
# prepare dicts for lookup (not sets, for Python 2.2 compatibility):
self.strip_elements = dict(
[(key, None)
for key in (self.document.settings.strip_elements_with_classes
or [])])
self.strip_classes = dict(
[(key, None) for key in (self.document.settings.strip_classes
or [])])
for node in self.document.traverse(self.check_classes):
node.parent.remove(node)
def check_classes(self, node):
if isinstance(node, nodes.Element):
for class_value in node['classes'][:]:
if class_value in self.strip_classes:
node['classes'].remove(class_value)
if class_value in self.strip_elements:
return 1
| {
"repo_name": "Soya93/Extract-Refactoring",
"path": "python/helpers/py2only/docutils/transforms/universal.py",
"copies": "5",
"size": "6541",
"license": "apache-2.0",
"hash": -2266339016640107800,
"line_mean": 31.3811881188,
"line_max": 78,
"alpha_frac": 0.5801865158,
"autogenerated": false,
"ratio": 4.59985935302391,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7680045868823909,
"avg_score": null,
"num_lines": null
} |
"""$Id: uri.py 511 2006-03-07 05:19:10Z rubys $"""
# This is working code, with tests, but not yet integrated into validation.
# (Change unique in validators.py to use Uri(self.value), rather than the
# plain value.)
# Ideally, this would be part of the core Python classes.
# It's probably not ready for deployment, but having it here helps establish
# the test case as a repository for any pathological cases that people
# suggest.
from urlparse import urljoin
from urllib import quote, quote_plus, unquote, unquote_plus
from unicodedata import normalize
from codecs import lookup
import re
(enc, dec) = lookup('UTF-8')[:2]
SUBDELIMS='!$&\'()*+,;='
PCHAR='-._~' + SUBDELIMS + ':@'
GENDELIMS=':/?#[]@'
RESERVED=GENDELIMS + SUBDELIMS
default_port = {
'ftp': 21,
'telnet': 23,
'http': 80,
'gopher': 70,
'news': 119,
'nntp': 119,
'prospero': 191,
'https': 443,
'snews': 563,
'snntp': 563,
}
class BadUri(Exception):
pass
def _n(s):
return enc(normalize('NFC', dec(s)[0]))[0]
octetRe = re.compile('([^%]|%[a-fA-F0-9]{2})')
def asOctets(s):
while (s):
m = octetRe.match(s)
if not(m):
raise BadUri()
c = m.group(1)
if (c[0] == '%'):
yield(c.upper(), chr(int(c[1:], 0x10)))
else:
yield(c, c)
s = s[m.end(1):]
def _qnu(s,safe=''):
if s == None:
return None
# unquote{,_plus} leave high-bit octets unconverted in Unicode strings
# This conversion will, correctly, cause UnicodeEncodeError if there are
# non-ASCII characters present in the string
s = str(s)
res = ''
b = ''
for (c,x) in asOctets(s):
if x in RESERVED and x in safe:
res += quote(_n(unquote(b)), safe)
b = ''
res += c
else:
b += x
res += quote(_n(unquote(b)), safe)
return res
def _normPort(netloc,defPort):
nl = netloc.lower()
p = defPort
i = nl.find(':')
if i >= 0:
ps = nl[i + 1:]
if ps:
if not(ps.isdigit()):
return netloc
p = int(ps)
nl = nl[:i]
if nl and nl[-1] == '.' and nl.rfind('.', 0, -2) >= 0:
nl = nl[:-1]
if p != defPort:
nl = nl + ':' + str(p)
return nl
def _normAuth(auth,port):
i = auth.rfind('@')
if i >= 0:
c = auth[:i]
if c == ':':
c = ''
h = auth[i + 1:]
else:
c = None
h = auth
if c:
return c + '@' + _normPort(h,port)
else:
return _normPort(h,port)
def _normPath(p):
l = p.split(u'/')
i = 0
if l and l[0]:
i = len(l)
while i < len(l):
c = l[i]
if (c == '.'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
elif (c == '..'):
if i < len(l) - 1:
del l[i]
else:
l[i] = ''
if i > 1 or (i > 0 and l[0]):
i -= 1
del l[i]
else:
i += 1
if l == ['']:
l = ['', '']
return u'/'.join([_qnu(c, PCHAR) for c in l])
# From RFC 2396bis, with added end-of-string marker
uriRe = re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$')
def _canonical(s):
m = uriRe.match(s)
if not(m):
raise BadUri()
# Check for a relative URI
if m.group(2) is None:
scheme = None
else:
scheme = m.group(2).lower()
if m.group(4) is None:
authority = None
p = m.group(5)
# Don't try to normalise URI references with relative paths
if scheme is None and not p.startswith('/'):
return None
if scheme == 'mailto':
# XXX From RFC 2368, mailto equivalence needs to be subtler than this
i = p.find('@')
if i > 0:
j = p.find('?')
if j < 0:
j = len(p)
p = _qnu(p[:i]) + '@' + _qnu(p[i + 1:].lower()) + _qnu(p[j:])
path = p
else:
if scheme is None or p.startswith('/'):
path = _normPath(p)
else:
path = _qnu(p, PCHAR + '/')
else:
a = m.group(4)
p = m.group(5)
if scheme in default_port:
a = _normAuth(a, default_port[scheme])
else:
a = _normAuth(a, None)
authority = a
path = _normPath(p)
query = _qnu(m.group(7), PCHAR + "/?")
fragment = _qnu(m.group(9), PCHAR + "/?")
s = u''
if scheme != None:
s += scheme + ':'
if authority != None:
s += '//' + authority
s += path
if query != None:
s += '?' + query
if fragment != None:
s += '#' + fragment
return s
class Uri:
"""A Uri wraps a string and performs equality testing according to the
rules for URI equivalence. """
def __init__(self,s):
self.s = s
self.n = _canonical(s)
def __str__(self):
return self.s
def __repr__(self):
return repr(self.s)
def __eq__(self, a):
return self.n == a.n
def canonicalForm(u):
"""Give the canonical form for a URI, so char-by-char comparisons become valid tests for equivalence."""
try:
return _canonical(u)
except BadUri:
return None
except UnicodeError:
return None
| {
"repo_name": "Suninus/NewsBlur",
"path": "vendor/feedvalidator/uri.py",
"copies": "18",
"size": "4858",
"license": "mit",
"hash": 7089723332173912000,
"line_mean": 19.8497854077,
"line_max": 106,
"alpha_frac": 0.5317002882,
"autogenerated": false,
"ratio": 2.9388989715668483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0306009553599325,
"num_lines": 233
} |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
# @author Shawn Schaffert <sms@eecs.berkeley.edu>
import threading
class Timer( object ) :
def __init__( self , callback=None , period=0 , numFiring=0 , waitTime=0 ) :
self.period = period #must be >= 0
self.waitTime = waitTime #must be >=0
self.numFiring = numFiring # 0 = forever, 1 = one-shot , 2+ = finite repeats
self.callback = callback
def __fireNext( self ) :
if self.numFiring == 0 :
self.timer = threading.Timer( self.period , self.__callback ).start()
elif self.remainingFirings == 0 :
self.timer = None
else :
self.timer = threading.Timer( self.period , self.__callback ).start()
self.remainingFirings -= 1
def __callback( self ) :
if self.stopTimer :
self.timer = None
else :
self.__fireNext()
if self.callback:
self.callback()
def __waitOver( self ) :
self.__fireNext()
def start( self ) :
self.timer = None
self.remainingFirings = self.numFiring
self.stopTimer = False
if self.waitTime > 0 :
self.timer = threading.Timer( self.waitTime , self.__waitOver ).start()
else :
self.__fireNext()
def cancel( self ) :
self.stopTimer = True
| {
"repo_name": "ekiwi/tinyos-1.x",
"path": "contrib/nestfe/python/pytos/Util.py",
"copies": "2",
"size": "2323",
"license": "bsd-3-clause",
"hash": -2885064478895254000,
"line_mean": 35.296875,
"line_max": 81,
"alpha_frac": 0.6939302626,
"autogenerated": false,
"ratio": 3.493233082706767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187163345306767,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import types
import warnings
import unicodedata
from types import StringType, UnicodeType
from docutils import ApplicationError, DataError
from docutils import nodes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The encoding for stderr output.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.encoding = encoding
"""The character encoding for the stderr output."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif type(stream) in (StringType, UnicodeType):
# Leave stream untouched if it's ''.
if stream != '':
if type(stream) == StringType:
stream = open(stream, 'w')
elif type(stream) == UnicodeType:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if kwargs.has_key('base_node'):
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL):
msgtext = msg.astext().encode(self.encoding, self.error_handler)
print >>self.stream, msgtext
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if options.has_key(name):
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none provided, a default set will be used.
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode.data)
if match:
textnode.data = pattern.sub(substitution, textnode.data)
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, \
'stylesheet and stylesheet_path are mutually exclusive.'
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, types.UnicodeType):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, filename):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "PatrickKennedy/Sybil",
"path": "docutils/utils.py",
"copies": "2",
"size": "21280",
"license": "bsd-2-clause",
"hash": -3848641082463060000,
"line_mean": 34.8853288364,
"line_max": 79,
"alpha_frac": 0.6027255639,
"autogenerated": false,
"ratio": 4.310309904800486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5913035468700486,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import b
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The encoding for stderr output.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.encoding = encoding
"""The character encoding for the stderr output."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif type(stream) in (str, unicode):
# Leave stream untouched if it's ''.
if stream != '':
if type(stream) == str:
stream = open(stream, 'w')
elif type(stream) == unicode:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL):
msgtext = msg.astext().encode(self.encoding, self.error_handler)
self.stream.write(msgtext)
self.stream.write(b('\n'))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none provided, a default set will be used.
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
return settings.stylesheet_path.split(",")
elif settings.stylesheet:
return settings.stylesheet.split(",")
else:
return []
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "spreeker/democracygame",
"path": "external_apps/docutils-snapshot/build/lib/docutils/utils.py",
"copies": "2",
"size": "22277",
"license": "bsd-3-clause",
"hash": -7065284704467101000,
"line_mean": 34.8727858293,
"line_max": 79,
"alpha_frac": 0.6051981865,
"autogenerated": false,
"ratio": 4.3105650154798765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5915763201979877,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import b
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='replace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif type(stream) in (str, unicode):
# Leave stream untouched if it's ''.
if stream != '':
if type(stream) == str:
stream = open(stream, 'w')
elif type(stream) == unicode:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext().encode(self.encoding, self.error_handler)
self.stream.write(msgtext)
self.stream.write(b('\n'))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Provides a conversion to unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except UnicodeDecodeError:
path = path.decode('utf-8', 'strict')
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none provided, a default set will be used.
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
return settings.stylesheet_path.split(",")
elif settings.stylesheet:
return settings.stylesheet.split(",")
else:
return []
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/utils.py",
"copies": "2",
"size": "23796",
"license": "bsd-3-clause",
"hash": 7530035242062554000,
"line_mean": 34.7222222222,
"line_max": 79,
"alpha_frac": 0.5884182215,
"autogenerated": false,
"ratio": 4.401775804661487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5990194026161487,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import unicodedata
import warnings
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import bytes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif stream and type(stream) in (unicode, bytes):
# if `stream` is a file name, open it
if type(stream) is bytes:
stream = open(stream, 'w')
else:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext() + '\n'
try:
self.stream.write(msgtext)
except UnicodeEncodeError:
self.stream.write(msgtext.encode(self.encoding,
self.error_handler))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Convert to Unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
if settings.stylesheet_path:
sheets = settings.stylesheet_path.split(",")
elif settings.stylesheet:
sheets = settings.stylesheet.split(",")
else:
sheets = []
# strip whitespace (frequently occuring in config files)
return [sheet.strip(u' \t\n\r') for sheet in sheets]
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "Soya93/Extract-Refactoring",
"path": "python/helpers/py2only/docutils/utils.py",
"copies": "5",
"size": "24559",
"license": "apache-2.0",
"hash": -8878203385654736000,
"line_mean": 35.3298816568,
"line_max": 84,
"alpha_frac": 0.6036076387,
"autogenerated": false,
"ratio": 4.341347003712215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7444954642412215,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import bytes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif stream and type(stream) in (unicode, bytes):
# if `stream` is a file name, open it
if type(stream) is bytes:
stream = open(stream, 'w')
else:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext() + '\n'
try:
self.stream.write(msgtext)
except UnicodeEncodeError:
self.stream.write(msgtext.encode(self.encoding,
self.error_handler))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Convert to Unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
if settings.stylesheet_path:
sheets = settings.stylesheet_path.split(",")
elif settings.stylesheet:
sheets = settings.stylesheet.split(",")
else:
sheets = []
# strip whitespace (frequently occuring in config files)
return [sheet.strip(u' \t\n\r') for sheet in sheets]
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "alphafoobar/intellij-community",
"path": "python/helpers/docutils/utils.py",
"copies": "40",
"size": "24558",
"license": "apache-2.0",
"hash": 2989172734709786600,
"line_mean": 35.3822222222,
"line_max": 84,
"alpha_frac": 0.6036322176,
"autogenerated": false,
"ratio": 4.341937765205092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils.error_reporting import ErrorOutput, SafeString
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced if the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string) or `False` (for discarding all stream messages)
or `None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if not isinstance(stream, ErrorOutput):
stream = ErrorOutput(stream, encoding, error_handler)
self.stream = stream
"""Where warning output is sent."""
self.encoding = encoding or getattr(stream, 'encoding', 'ascii')
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if not isinstance(stream, ErrorOutput):
stream = ErrorOutput(stream, self.encoding, self.error_handler)
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
# `message` can be a `string`, `unicode`, or `Exception` instance.
if isinstance(message, Exception):
message = SafeString(message)
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
self.stream.write(msg.astext() + '\n')
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Ensure `path` is Unicode. Return `nodes.reprunicode` object.
Decode file/path string in a failsave manner if not already done.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
if isinstance(path, unicode):
return path
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
if settings.stylesheet_path:
sheets = settings.stylesheet_path.split(",")
elif settings.stylesheet:
sheets = settings.stylesheet.split(",")
else:
sheets = []
# strip whitespace (frequently occuring in config files)
return [sheet.strip(u' \t\n') for sheet in sheets]
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_widt()` to character
column widths."""
def column_width(text):
"""Return the column width of text.
Correct ``len(text)`` for wide East Asian and combining Unicode chars.
"""
if isinstance(text, str) and sys.version_info < (3,0):
return len(text)
combining_correction = sum([-1 for c in text
if unicodedata.combining(c)])
try:
width = sum([east_asian_widths[unicodedata.east_asian_width(c)]
for c in text])
except AttributeError: # east_asian_width() New in version 2.4.
width = len(text)
return width + combining_correction
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
# by Li Daobing http://code.activestate.com/recipes/190465/
# since Python 2.6 there is also itertools.combinations()
def unique_combinations(items, n):
"""Return r-length tuples, in sorted order, no repeated elements"""
if n==0: yield []
else:
for i in xrange(len(items)-n+1):
for cc in unique_combinations(items[i+1:],n-1):
yield [items[i]]+cc
def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag.
Example:
>>> normalize_language_tag('de-AT-1901')
['de_at_1901', 'de_at', 'de_1901', 'de']
"""
# normalize:
tag = tag.lower().replace('-','_')
# find all combinations of subtags
taglist = []
base_tag= tag.split('_')[:1]
subtags = tag.split('_')[1:]
# print base_tag, subtags
for n in range(len(subtags), 0, -1):
for tags in unique_combinations(subtags, n):
# print tags
taglist.append('_'.join(base_tag + tags))
taglist += base_tag
return taglist
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
if self.file not in (sys.stdout, sys.stderr):
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| {
"repo_name": "cuongthai/cuongthai-s-blog",
"path": "docutils/utils.py",
"copies": "2",
"size": "26343",
"license": "bsd-3-clause",
"hash": 5664519041428383000,
"line_mean": 35.7948350072,
"line_max": 84,
"alpha_frac": 0.591428463,
"autogenerated": false,
"ratio": 4.364314115308151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5955742578308151,
"avg_score": null,
"num_lines": null
} |
"""$Id: validators.py 749 2007-04-02 15:45:49Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 749 $"
__date__ = "$Date: 2007-04-02 15:45:49 +0000 (Mon, 02 Apr 2007) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
import re, time, datetime
from uri import canonicalForm, urljoin
from rfc822 import AddressList, parsedate
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+(\s*;\s*[^\s()<>,;:\\"/[\]?=]+=("(\\"|[^"])*"|[^\s()<>,;:\\"/[\]?=]+))*$')
#
# Extensibility hook: logic varies based on type of feed
#
def any(self, name, qname, attrs):
if self.getFeedType() != TYPE_RSS1:
return eater()
else:
from rdf import rdfExtension
return rdfExtension(qname)
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def getExpectedAttrNames(self):
return self.attrs.getNames()
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
# RSS 2.0 arbitrary restriction on extensions
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2) and self.name.find('_')>=0:
from logging import NotInANamespace
self.log(NotInANamespace({"parent":self.name, "element":name, "namespace":'""'}))
# ensure element is "namespace well formed"
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
for c in attrs.get((namespace,attr)):
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":attr}))
# eat children
self.push(eater(), name, attrs)
from HTMLParser import HTMLParser, HTMLParseError
class HTMLValidator(HTMLParser):
htmltags = [
"a", "abbr", "acronym", "address", "applet", "area", "b", "base",
"basefont", "bdo", "big", "blockquote", "body", "br", "button", "caption",
"center", "cite", "code", "col", "colgroup", "dd", "del", "dir", "div",
"dfn", "dl", "dt", "em", "fieldset", "font", "form", "frame", "frameset",
"h1", "h2", "h3", "h4", "h5", "h6",
"head", "hr", "html", "i", "iframe", "img", "input", "ins",
"isindex", "kbd", "label", "legend", "li", "link", "map", "menu", "meta",
"noframes", "noscript", "object", "ol", "optgroup", "option", "p",
"param", "pre", "q", "s", "samp", "script", "select", "small", "span",
"strike", "strong", "style", "sub", "sup", "table", "tbody", "td",
"textarea", "tfoot", "th", "thead", "title", "tr", "tt", "u", "ul",
"var", "xmp", "plaintext", "embed", "comment", "listing"]
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img',
'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol',
'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small',
'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td',
'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var',
'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'bgcolor', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'face', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc',
'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade',
'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules',
'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary',
'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value',
'vspace', 'width', 'xml:lang', 'xmlns']
acceptable_css_properties = ['azimuth', 'background', 'background-color',
'border', 'border-bottom', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-collapse', 'border-color', 'border-left',
'border-left-color', 'border-left-style', 'border-left-width',
'border-right', 'border-right-color', 'border-right-style',
'border-right-width', 'border-spacing', 'border-style', 'border-top',
'border-top-color', 'border-top-style', 'border-top-width', 'border-width',
'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant',
'font-weight', 'height', 'letter-spacing', 'line-height', 'margin',
'margin-bottom', 'margin-left', 'margin-right', 'margin-top', 'overflow',
'padding', 'padding-bottom', 'padding-left', 'padding-right',
'padding-top', 'pause', 'pause-after', 'pause-before', 'pitch',
'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral',
'speak-punctuation', 'speech-rate', 'stress', 'text-align',
'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align',
'voice-family', 'volume', 'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['aqua', 'auto', 'black', 'block', 'blue', 'bold',
'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted',
'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime',
'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d?\.?\d?\d(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
def log(self,msg):
offset = [self.element.line + self.getpos()[0] - 1 -
self.element.dispatcher.locator.getLineNumber(),
-self.element.dispatcher.locator.getColumnNumber()]
self.element.log(msg, offset)
def __init__(self,value,element):
self.element=element
self.valid = True
HTMLParser.__init__(self)
if value.lower().find('<?import ') >= 0:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":"?import"}))
try:
self.feed(value)
self.close()
if self.valid:
self.log(ValidHtml({"parent":self.element.parent.name, "element":self.element.name}))
except HTMLParseError, msg:
element = self.element
offset = [element.line - element.dispatcher.locator.getLineNumber(),
- element.dispatcher.locator.getColumnNumber()]
match = re.search(', at line (\d+), column (\d+)',str(msg))
if match: offset[0] += int(match.group(1))-1
element.log(NotHtml({"parent":element.parent.name, "element":element.name, "value": str(msg)}),offset)
def handle_starttag(self, tag, attributes):
if tag.lower() not in self.htmltags:
self.log(NotHtml({"parent":self.element.parent.name, "element":self.element.name,"value":tag, "message": "Non-html tag"}))
self.valid = False
elif tag.lower() not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":tag}))
for (name,value) in attributes:
if name.lower() == 'style':
for evil in checkStyle(value):
self.log(DangerousStyleAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":"style", "value":evil}))
elif name.lower() not in self.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":name}))
def handle_charref(self, name):
if name.startswith('x'):
value = int(name[1:],16)
else:
value = int(name)
if 0x80 <= value <= 0x9F or value == 0xfffd:
self.log(BadCharacters({"parent":self.element.parent.name,
"element":self.element.name, "value":"&#" + name + ";"}))
#
# Scub CSS properties for potentially evil intent
#
def checkStyle(style):
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return [style]
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style):
return [style]
unsafe = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style.lower()):
if prop not in HTMLValidator.acceptable_css_properties:
if prop not in unsafe: unsafe.append(prop)
elif prop.split('-')[0] in ['background','border','margin','padding']:
for keyword in value.split():
if keyword not in HTMLValidator.acceptable_css_keywords and \
not HTMLValidator.valid_css_values.match(keyword):
if keyword not in unsafe: unsafe.append(keyword)
return unsafe
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def getExpectedAttrNames(self):
if self.attrs and len(self.attrs):
return self.attrs.getNames()
def textOK(self): pass
def startElementNS(self, name, qname, attrs):
for attr in attrs.getNames():
if attr[0]==None:
if attr[1].lower() == 'style':
for value in checkStyle(attrs.get(attr)):
self.log(DangerousStyleAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1], "value":value}))
elif attr[1].lower() not in HTMLValidator.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1]}))
self.push(htmlEater(), self.name, attrs)
if name.lower() not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({"parent":self.parent.name, "element":self.name, "tag":name}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
class text(validatorBase):
def textOK(self): pass
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'datatype'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
else:
return []
def startElementNS(self, name, qname, attrs):
if self.getFeedType() == TYPE_RSS1:
if self.value.strip() or self.children:
if self.attrs.get((u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')) != 'Literal':
self.log(InvalidRDF({"message":"mixed content"}))
from rdf import rdfExtension
self.push(rdfExtension(qname), name, attrs)
else:
from base import namespaces
ns = namespaces.get(qname, '')
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
self.push(eater(), name, attrs)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def __init__(self, message=DuplicateElement):
self.message=message
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
pass
def characters(self, string):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(self.message({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addr-spec
#
class addr_spec(text):
email_re = re.compile('''([a-zA-Z0-9_\-\+\.\']+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$''')
message = InvalidAddrSpec
def validate(self, value=None):
if not value: value=self.value
if not self.email_re.match(value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
def iso639_validate(log,value,element,parent):
import iso639codes
if '-' in value:
lang, sublang = value.split('-', 1)
else:
lang = value
if not iso639codes.isoLang.has_key(unicode.lower(unicode(lang))):
log(InvalidLanguage({"parent":parent, "element":element, "value":value}))
else:
log(ValidLanguage({"parent":parent, "element":element}))
class iso639(text):
def validate(self):
iso639_validate(self.log, self.value, self.name, self.parent.name)
#
# Encoding charset
#
class Charset(text):
def validate(self):
try:
import codecs
codecs.lookup(self.value)
except:
self.log(InvalidEncoding({'value': self.value}))
#
# Mime type
#
class MimeType(text):
def validate(self):
if not mime_re.match(self.value):
self.log(InvalidMIMEType({'attr':'type'}))
#
# iso8601 dateTime
#
class iso8601(text):
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
message = InvalidISO8601DateTime
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
if len(date)>2: datetime.date(year,month,int(date[2]))
except ValueError, e:
return self.log(self.message({"parent":self.parent.name, "element":self.name, "value":str(e)}))
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3CDTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return 1
class w3cdtf(iso8601):
# The same as in iso8601, except a timezone is not optional when
# a time is present
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d)))?)?)?$")
message = InvalidW3CDTFDate
class rfc3339(iso8601):
# The same as in iso8601, except that the only thing that is optional
# is the seconds
iso8601_re = re.compile("^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d*)?" +
"(Z|([+-]\d\d:\d\d))$")
message = InvalidRFC3339Date
def validate(self):
if iso8601.validate(self):
tomorrow=time.strftime("%Y-%m-%dT%H:%M:%SZ",time.localtime(time.time()+86400))
if self.value > tomorrow or self.value < "1970":
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
return 0
return 1
return 0
class iso8601_date(iso8601):
date_re = re.compile("^\d\d\d\d-\d\d-\d\d$")
def validate(self):
if iso8601.validate(self):
if not self.date_re.search(self.value):
self.log(InvalidISO8601Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
iana_schemes = [ # http://www.iana.org/assignments/uri-schemes.html
"ftp", "http", "gopher", "mailto", "news", "nntp", "telnet", "wais",
"file", "prospero", "z39.50s", "z39.50r", "cid", "mid", "vemmi",
"service", "imap", "nfs", "acap", "rtsp", "tip", "pop", "data", "dav",
"opaquelocktoken", "sip", "sips", "tel", "fax", "modem", "ldap",
"https", "soap.beep", "soap.beeps", "xmlrpc.beep", "xmlrpc.beeps",
"urn", "go", "h323", "ipp", "tftp", "mupdate", "pres", "im", "mtqp",
"iris.beep", "dict", "snmp", "crid", "tag", "dns", "info"
]
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
rfc2396_re = re.compile("([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]*$")
urn_re = re.compile(r"^[Uu][Rr][Nn]:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*(#[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*)?$")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
scheme=self.value.split(':')[0].lower()
if scheme=='tag':
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif scheme=="urn":
if self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif not self.rfc2396_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
urichars_re=re.compile("[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]")
for c in self.value:
if ord(c)<128 and not urichars_re.match(c):
logparams['value'] = repr(str(c))
self.log(InvalidUriChar(logparams))
break
else:
try:
if self.rfc2396_re.match(self.value.encode('idna')):
errorClass=UriNotIri
except:
pass
self.log(errorClass(logparams))
elif scheme in ['http','ftp']:
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
elif self.value.find(':')>=0 and scheme.isalpha() and scheme not in iana_schemes:
self.log(SchemeNotIANARegistered({"parent":self.parent.name, "element":self.name, "value":scheme}))
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
return success
#
# rfc3987 iri
#
class rfc3987(rfc2396):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
try:
if self.value: self.value = self.value.encode('idna')
except:
pass # apparently '.' produces label too long
return rfc2396.validate(self, errorClass, successClass, extraParams)
class rfc2396_full(rfc2396):
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
#
# URI reference resolvable relative to xml:base
#
class xmlbase(rfc3987):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
if rfc3987.validate(self, errorClass, successClass, extraParams):
if self.dispatcher.xmlBase != self.xmlBase:
docbase=canonicalForm(self.dispatcher.xmlBase).split('#')[0]
elembase=canonicalForm(self.xmlBase).split('#')[0]
value=canonicalForm(urljoin(elembase,self.value)).split('#')[0]
if (value==elembase) and (elembase.encode('idna')!=docbase):
self.log(SameDocumentReference({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((mon)|(tue)|(wed)|(thu)|(fri)|(sat)|(sun))\s*,\s*)?" +
"\d\d?\s+((jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|" +
"(nov)|(dec))\s+\d\d(\d\d)?\s+\d\d:\d\d(:\d\d)?\s+(([+-]\d\d\d\d)|" +
"(ut)|(gmt)|(est)|(edt)|(cst)|(cdt)|(mst)|(mdt)|(pst)|(pdt)|[a-ik-z])?$",
re.UNICODE)
rfc2822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), )?" +
"\d\d? ((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) \d\d\d\d \d\d:\d\d(:\d\d)? (([+-]?\d\d[03]0)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|Z)$")
def validate(self):
if self.rfc2822_re.match(self.value):
import calendar
value = parsedate(self.value)
try:
if value[0] > 1900:
dow = datetime.date(*value[:3]).strftime("%a")
if self.value.find(',')>0 and dow.lower() != self.value[:3].lower():
self.log(IncorrectDOW({"parent":self.parent.name, "element":self.name, "value":self.value[:3]}))
return
except ValueError, e:
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":str(e)}))
return
tomorrow=time.localtime(time.time()+86400)
if value > tomorrow or value[0] < 1970:
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
value1,value2 = '', self.value
value2 = re.sub(r'[\\](.)','',value2)
while value1!=value2: value1,value2=value2,re.sub('\([^(]*?\)',' ',value2)
if not self.rfc822_re.match(value2.strip().lower()):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ProblematicalRFC822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import name2codepoint
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=unichr(int(chunks[i]))
elif chunks[i] in name2codepoint:
chunks[i]=unichr(name2codepoint[chunks[i]])
else:
chunks[i]='&' + chunks[i] +';'
# print repr(chunks)
return u"".join(map(unicode,chunks))
#
# Scan HTML for relative URLs
#
class absUrlMixin:
anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))\s*>', re.IGNORECASE)
img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
absref_re = re.compile("\w+:")
def validateAbsUrl(self,value):
refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
if not self.absref_re.match(decodehtml(ref)):
self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name, "value": ref}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
def validateSafe(self,value):
HTMLValidator(value, self)
class safeHtml(text, safeHtmlMixin, absUrlMixin):
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
def validate(self):
self.validateSafe(self.value)
self.validateAbsUrl(self.value)
#
# Elements for which email addresses are discouraged
#
class nonemail(text):
email_re = re.compile("<" + addr_spec.email_re.pattern[:-1] + ">")
def validate(self):
if self.email_re.search(self.value):
self.log(ContainsEmail({"parent":self.parent.name, "element":self.name}))
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</(\w+)>")
htmlEntity_re = re.compile("&(#?\w+);")
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
def validate(self, message=ContainsHTML):
tags = [t for t in self.htmlEndTag_re.findall(self.value) if t.lower() in HTMLValidator.htmltags]
if tags:
self.log(message({"parent":self.parent.name, "element":self.name, "value":tags[0]}))
elif self.htmlEntity_re.search(self.value):
for value in self.htmlEntity_re.findall(self.value):
from htmlentitydefs import name2codepoint
if (value in name2codepoint or not value.isalpha()) and \
value not in self.dispatcher.literal_entities:
self.log(message({"parent":self.parent.name, "element":self.name, "value":'&'+value+';'}))
#
# valid e-mail addresses
#
class email(addr_spec,nonhtml):
message = InvalidContact
def validate(self):
value=self.value
list = AddressList(self.value)
if len(list)==1: value=list[0][1]
nonhtml.validate(self)
addr_spec.validate(self, value)
class nonNegativeInteger(text):
def validate(self):
try:
t = int(self.value)
if t < 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidNonNegativeInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class positiveInteger(text):
def validate(self):
if self.value == '': return
try:
t = int(self.value)
if t <= 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPositiveInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class Integer(text):
def validate(self):
if self.value == '': return
try:
t = int(self.value)
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class Float(text):
def validate(self, name=None):
if not re.match('\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
class percentType(text):
def validate(self):
try:
t = float(self.value)
if t < 0.0 or t > 100.0:
raise ValueError
else:
self.log(ValidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
class latitude(text):
def validate(self):
try:
lat = float(self.value)
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class longitude(text):
def validate(self):
try:
lon = float(self.value)
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# mixin to validate URL in attribute
#
class httpURLMixin:
http_re = re.compile("http://", re.IGNORECASE)
def validateHttpURL(self, ns, attr):
value = self.attrs[(ns, attr)]
if not self.http_re.search(value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
elif not rfc2396_full.rfc2396_re.match(value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "attr":attr}))
class rdfResourceURI(rfc2396):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/dc/elements/1.1/', u'title')]
def validate(self):
if (rdfNS, 'resource') in self.attrs.getNames():
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
elif self.getFeedType() == TYPE_RSS1:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
class rdfAbout(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396().setElement(self.name, self.attrs, self)
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class nows(text):
def __init__(self):
self.ok = 1
text.__init__(self)
def characters(self, string):
text.characters(self, string)
if self.ok and (self.value != self.value.strip()):
self.log(UnexpectedWhitespace({"parent":self.parent.name, "element":self.name}))
self.ok = 0
class unique(nonblank):
def __init__(self, name, scope, message=DuplicateValue):
self.name=name
self.scope=scope
self.message=message
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.name+'s']
if self.value in list:
self.log(self.message({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.value:
list.append(self.value)
class rfc3987_full(xmlbase):
rfc2396_re = rfc2396_full.rfc2396_re
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
class canonicaluri(rfc3987_full):
def validate(self):
prestrip = self.value
self.value = self.value.strip()
if rfc3987_full.validate(self):
c = canonicalForm(self.value)
if c is None or c != prestrip:
self.log(NonCanonicalURI({"parent":self.parent.name,"element":self.name,"uri":prestrip, "curi":c or 'N/A'}))
class yesno(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['yes','no','clean']:
self.log(InvalidYesNo({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalse(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class duration(text):
duration_re = re.compile("([0-9]?[0-9]:)?[0-5]?[0-9]:[0-5][0-9]$")
def validate(self):
if not self.duration_re.search(self.value):
self.log(InvalidDuration({"parent":self.parent.name, "element":self.name
, "value":self.value}))
class lengthLimitedText(nonhtml):
def __init__(self, max):
self.max = max
text.__init__(self)
def validate(self):
if len(self.value)>self.max:
self.log(TooLong({"parent":self.parent.name, "element":self.name,
"len": len(self.value), "max": self.max}))
nonhtml.validate(self)
class keywords(text):
def validate(self):
if self.value.find(' ')>=0 and self.value.find(',')<0:
self.log(InvalidKeywords({"parent":self.parent.name, "element":self.name}))
class commaSeparatedIntegers(text):
def validate(self):
if not re.match("^\d+(,\s*\d+)*$", self.value):
self.log(InvalidCommaSeparatedIntegers({"parent":self.parent.name,
"element":self.name}))
class formname(text):
def validate(self):
if not re.match("^[a-zA-z][a-zA-z0-9:._]*", self.value):
self.log(InvalidFormComponentName({"parent":self.parent.name,
"element":self.name, "value":self.value}))
class enumeration(text):
def validate(self):
if self.value not in self.valuelist:
self.log(self.error({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class caseinsensitive_enumeration(enumeration):
def validate(self):
self.value=self.value.lower()
enumeration.validate(self)
class iso3166(enumeration):
error = InvalidCountryCode
valuelist = [
"AD", "AE", "AF", "AG", "AI", "AM", "AN", "AO", "AQ", "AR", "AS", "AT",
"AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ",
"BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC",
"CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU",
"CV", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE",
"EG", "EH", "ER", "ES", "ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA",
"GB", "GD", "GE", "GF", "GH", "GI", "GL", "GM", "GN", "GP", "GQ", "GR",
"GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID",
"IE", "IL", "IN", "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE",
"KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB",
"LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD",
"MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT",
"MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI",
"NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", "PF", "PG", "PH",
"PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA", "RE", "RO",
"RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK",
"SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TC", "TD", "TF",
"TG", "TH", "TJ", "TK", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ",
"UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN",
"VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW"]
class iso4217(enumeration):
error = InvalidCurrencyUnit
valuelist = [
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZM",
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV",
"BRL", "BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF",
"CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CSD", "CUP", "CVE",
"CYP", "CZK", "DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ETB",
"EUR", "FJD", "FKP", "GBP", "GEL", "GHC", "GIP", "GMD", "GNF", "GTQ",
"GWP", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR",
"IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF",
"KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL",
"LTL", "LVL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP",
"MRO", "MTL", "MUR", "MWK", "MXN", "MXV", "MYR", "MZM", "NAD", "NGN",
"NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR",
"PLN", "PYG", "QAR", "ROL", "RON", "RUB", "RWF", "SAR", "SBD", "SCR",
"SDD", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "STD",
"SVC", "SYP", "SZL", "THB", "TJS", "TMM", "TND", "TOP", "TRL", "TRY",
"TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "USS", "UYU", "UZS",
"VEB", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC",
"XBD", "XCD", "XDR", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XTS",
"XXX", "YER", "ZAR", "ZMK", "ZWD"]
| {
"repo_name": "manderson23/NewsBlur",
"path": "vendor/feedvalidator/validators.py",
"copies": "16",
"size": "36588",
"license": "mit",
"hash": 5543148569904567000,
"line_mean": 40.3423728814,
"line_max": 168,
"alpha_frac": 0.6007160818,
"autogenerated": false,
"ratio": 2.9535033903777848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A proxy class for widgets, that need to be scrolled."""
from pygame import Rect
from Bin import Bin
from BaseWidget import BaseWidget
from StyleInformation import StyleInformation
import base
class ViewPort (Bin):
"""ViewPort (widget) -> ViewPort
A proxy class for widgets, which need scrolling abilities.
The ViewPort class allows widgets to be scrolled by virtually adding
horizontal and vertical offsets to the respective methods such as
rect_to_client().
Widgets which make use of the ViewPort class can easily scroll the
encapsulated child(ren) by modifying the 'hadjustment' and
'vadjustment' attributes of the ViewPort.
viewport.hadjustment = 10 # Add a 10px horizontal offset.
viewport.set_vadjustment (-15) # Reduce by a 15px vertical offset.
The ViewPort however does not provide advanced scrolling features
such as scrollbars. Instead it should be bound as child to a widget,
which will implement such additives. Widgets, which make use of the
ViewPort's functionality can receive the absolute width and height
occupied by the ViewPorts child(ren) using the 'real_width' and
'real_height' attributes and their respective methods.
# Get the total width and height occupied by the ViewPort child.
abswidth = viewport.real_width
absheight = viewport.get_real_height ()
Concrete usage and inheritance implementations of the ViewPort can
be found in the ScrolledWindow, ListViewPort and ScrolledList
classes.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
"""
def __init__ (self, widget):
Bin.__init__ (self)
self._hadjustment = 0
self._vadjustment = 0
if widget:
self.minsize = widget.size
else:
self.minsize = 10, 10
self.child = widget
def set_focus (self, focus=True):
"""V.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the ViewPort.
The ViewPort class is not focusable by default. It is a layout
class for other widgets, so it does not need to get the input
focus and thus it will return false without doing anything.
"""
return False
def set_hadjustment (self, value):
"""V.set_hadjustment (...) -> None
Sets the horizontal adjustment to scroll.
Raises a TypeError, if the passed argument is not an integer.
"""
if type (value) != int:
raise TypeError ("value must be an integer")
self._hadjustment = value
self.dirty = True
def set_vadjustment (self, value):
"""V.set_vadjustment (...) -> None
Sets the vertical adjustment to scroll.
Raises a TypeError, if the passed argument is not an integer.
"""
if type (value) != int:
raise TypeError ("value must be an integer")
self._vadjustment = value
self.dirty = True
def get_real_width (self):
"""V.get_real_width () -> int
Gets the real width occupied by the ViewPort.
"""
if self.child:
return self.child.width
return self.width
def get_real_height (self):
"""V.get_real_height () -> int
Gets the real height occupied by the ViewPort.
"""
if self.child:
return self.child.height
return self.height
def rect_to_client (self, rect=None):
"""V.rect_to_client () -> pygame.Rect
Returns the absolute coordinates a rect is located at.
If a rect argument is passed, its size and position are modified
to match the criteria of the scrolling adjustments of the
ViewPort. Besides that it exactly behaves like the original
rect_to_client() method.
"""
if rect:
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("VIEWPORT_BORDER"))
rect.x = self.x + rect.x
rect.y = self.y + rect.y
if rect.right > self.right - border:
rect.width = self.right - border - rect.left
if rect.bottom > self.bottom - border:
rect.height = self.bottom - border - rect.top
if (self.parent != None) and isinstance (self.parent, BaseWidget):
return self.parent.rect_to_client (rect)
return rect
return Bin.rect_to_client (self)
def draw_bg (self):
"""V.draw_bg () -> Surface
Draws the ViewPort background surface and returns it.
Creates the visible surface of the ViewPort and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_viewport (self)
def draw (self):
"""V.draw () -> None
Draws the ViewPort surface and places its child on it.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("VIEWPORT_BORDER"))
Bin.draw (self)
if self.child:
self.child.topleft = border + self.hadjustment, \
border + self.vadjustment
self.image.blit (self.child.image, (border, border),
(abs (self.hadjustment), abs (self.vadjustment),
self.width - 2 * border,
self.height - 2 * border))
def update (self, **kwargs):
"""V.update (...) -> None
Updates the ViewPort.
Updates the ViewPort and causes its parent to update itself on
demand.
"""
if not self.dirty:
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("VIEWPORT_BORDER"))
resize = kwargs.get ("resize", False)
children = kwargs.get ("children", {})
blit = self.image.blit
items = children.items ()
# Clean up the dirty areas on the widget.
vals = []
for child, rect in items:
blit (self._bg, rect, rect)
# r will be the area for the blit.
r = Rect (abs (self.hadjustment), abs (self.vadjustment),
self.width - 2 * border, self.height - 2 * border)
blit (child.image, (border, border), r)
vals.append (r)
# If a parent's available, reassign the child rects, so that
# they point to the absolute position on the widget and build
# one matching them all for an update.
if self.parent:
rect = Rect (self._oldrect)
if len (vals) != 0:
for r in vals:
r.x += self.x
r.y += self.y
rect.unionall (vals[1:])
self.parent.update (children={ self : rect }, resize=resize)
self._lock = max (self._lock - 1, 0)
else:
Bin.update (self, **kwargs)
hadjustment = property (lambda self: self._hadjustment,
lambda self, var: self.set_hadjustment (var),
doc = "The horizontal scrolling adjustment.")
vadjustment = property (lambda self: self._vadjustment,
lambda self, var: self.set_vadjustment (var),
doc = "The vertical scrolling adjustment.")
real_width = property (lambda self: self.get_real_width (),
doc = "The real width occupied by the ViewPort.")
real_height = property (lambda self: self.get_real_height (),
doc = "The real height occupied by the ViewPort.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/ViewPort.py",
"copies": "1",
"size": "9380",
"license": "bsd-2-clause",
"hash": -1127505410490381600,
"line_mean": 37.4426229508,
"line_max": 79,
"alpha_frac": 0.6045842217,
"autogenerated": false,
"ratio": 4.451827242524917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5556411464224917,
"avg_score": null,
"num_lines": null
} |
# $Id: views.py 1d272b240620 2009/09/08 11:37:42 jpartogi $
from django.shortcuts import get_object_or_404
from django.views.generic import list_detail
from tagging.models import Tag, TaggedItem
from simple_blog.forms import *
from simple_blog.models import *
def entry_list(request, category_name=None, tag_name=None, queryset=None, paginate_by=None,
template_name=None, template_object_name=None):
if category_name != None:
category = get_object_or_404(Category, slug=category_name)
queryset = queryset.filter(category = category)
if tag_name != None:
tag = get_object_or_404(Tag,name=tag_name)
#queryset = TaggedItem.objects.get_by_model(Entry, tag) #TODO: this causes bug
queryset = queryset.filter(tag_list__contains = tag_name) #temporary fix?
queryset.order_by('posted')
return list_detail.object_list(request, queryset, paginate_by=paginate_by,
template_name = template_name,
template_object_name= template_object_name)
| {
"repo_name": "jpartogi/django-simple-blog",
"path": "simple_blog/views.py",
"copies": "1",
"size": "1120",
"license": "bsd-3-clause",
"hash": -3122685985055882000,
"line_mean": 40.5185185185,
"line_max": 91,
"alpha_frac": 0.64375,
"autogenerated": false,
"ratio": 3.7583892617449663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774658260998703,
"avg_score": 0.02549620014925265,
"num_lines": 27
} |
# $Id: views.py 22499ef140b8 2009/09/05 12:08:01 jpartogi $
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.contrib import messages
from .forms import ContactForm
from .models import EmailError
def form(request):
contact = None
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
try:
contact = form.save()
except EmailError:
messages.error(_('Verify your info, may be your email is wrong'))
else:
# TODO: Externalize this? Is i18n good enough?
message = _("Thank you for contacting us. We'll get back to you shortly.")
messages.success(request, message)
else:
form = ContactForm()
return render_to_response('contact_form/form.html', {
'form': form,
'request' : request,
'message' : contact,
}, context_instance=RequestContext(request))
| {
"repo_name": "jpartogi/django-contact-form",
"path": "contact_form/views.py",
"copies": "1",
"size": "1057",
"license": "bsd-3-clause",
"hash": -1434012539266767600,
"line_mean": 33.0967741935,
"line_max": 90,
"alpha_frac": 0.6291390728,
"autogenerated": false,
"ratio": 4.177865612648222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005217109086172617,
"num_lines": 31
} |
"""
V (Vgroup) API (:mod:`pyhdf.V`)
===============================
A module of the pyhdf package implementing the V (Vgroup)
API of the NCSA HDF4 library.
Introduction
------------
V is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
The V API supports the definition of vgroups inside an HDF file. A vgroup
can thought of as a collection of arbitrary "references" to other HDF
objects defined in the same file. A vgroup may hold references to
other vgroups. It is thus possible to organize HDF objects into some sort
of a hierarchy, similar to files grouped into a directory tree under unix.
This vgroup hierarchical nature partly explains the origin of the "HDF"
name (Hierarchical Data Format). vgroups can help logically organize the
contents of an HDF file, for example by grouping together all the datasets
belonging to a given experiment, and subdividing those datasets according
to the day of the experiment, etc.
The V API provides functions to find and access an existing vgroup,
create a new one, delete a vgroup, identify the members of a vgroup, add
and remove members to and from a vgroup, and set and query attributes
on a vgroup. The members of a vgroup are identified through their tags
and reference numbers. Tags are constants identifying each main object type
(dataset, vdata, vgroup). Reference numbers serve to distinguish among
objects of the same type. To add an object to a vgroup, one must first
initialize that object using the API proper to that object type (eg: SD for
a dataset) so as to create a reference number for that object, and then
pass this reference number and the type tag to the V API. When reading the
contents of a vgroup, the V API returns the tags and reference numbers of
the objects composing the vgroup. The user program must then call the
proper API to process each object, based on tag of this object (eg: VS for
a tag identifying a vdata object).
Some limitations of the V API must be stressed. First, HDF imposes
no integrity constraint whatsoever on the contents of a vgroup, nor does it
help maintain such integrity. For example, a vgroup is not strictly
hierarchical, because an object can belong to more than one vgroup. It would
be easy to create vgroups showing cycles among their members. Also, a vgroup
member is simply a reference to an HDF object. If this object is afterwards
deleted for any reason, the vgroup membership will not be automatically
updated. The vgroup will refer to a non-existent object and thus be left
in an inconsistent state. Nothing prevents adding the same member more than
once to a vgroup, and giving the same name to more than one vgroup.
Finally, the HDF library seems to make heavy use of vgroups for its own
internal needs, and creates vgroups "behind the scenes". This may make it
difficult to pick up "user defined" vgroups when browsing an HDF file.
Accessing the V module
-----------------------
To access the V module a python program can say one of:
>>> import pyhdf.V # must prefix names with "pyhdf.V."
>>> from pyhdf import V # must prefix names with "V."
>>> from pyhdf.V import * # names need no prefix
This document assumes the last import style is used.
V is not self-contained, and needs functionnality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
>>> from .HDF import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the V API.
_hdfext
C extension module responsible for wrapping the HDF
C library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
HDF
python module providing support to the V module
V
python module wrapping the V API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only HDF and V should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for the V module to
work.
HDF (v4) library
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"https://portal.hdfgroup.org/display/support/Download+HDF4".
Numeric is also needed by the SD module. See the SD module documentation.
Summary of differences between the pyhdf and C V API
-----------------------------------------------------
Most of the differences between the pyhdf and C V API can
be summarized as follows.
-In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
-In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
sequence of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors reported by the C V API with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
V needs support from the HDF module
------------------------------------
The VS module is not self-contained (countrary to the SD module).
It requires help from the HDF module, namely:
- the HDF.HDF class to open and close the HDF file, and initialize the
V interface
- the HDF.HC class to provide different sorts of constants (opening modes,
data types, etc).
A program wanting to access HDF vgroups will almost always need to execute
the following minimal set of calls:
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> hdfFile = HDF(name, HC.xxx)# open HDF file
>>> v = hdfFile.vgstart() # initialize V interface on HDF file
>>> ... # manipulate vgroups
>>> v.end() # terminate V interface
>>> hdfFile.close() # close HDF file
Classes summary
---------------
pyhdf wraps the V API using the following python classes::
V HDF V interface
VG vgroup
VGAttr vgroup attribute
In more detail::
V The V class implements the V (Vgroup) interface applied to an
HDF file.
To instantiate a V class, call the vgstart() method of an
HDF instance.
methods:
constructors
attach() open an existing vgroup given its name or its
reference number, or create a new vgroup,
returning a VG instance for that vgroup
create() create a new vgroup, returning a VG instance
for that vgroup
closing the interface
end() close the V interface on the HDF file
deleting a vgroup
delete() delete the vgroup identified by its name or
its reference number
searching
find() find a vgroup given its name, returning
the vgroup reference number
findclass() find a vgroup given its class name, returning
the vgroup reference number
getid() return the reference number of the vgroup
following the one with the given reference number
VG The VG class encapsulates the functionnality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance.
constructors
attr() return a VGAttr instance representing an attribute
of the vgroup
findattr() search the vgroup for a given attribute,
returning a VGAttr instance for that attribute
ending access to a vgroup
detach() terminate access to the vgroup
adding a member to a vgroup
add() add to the vgroup the HDF object identified by its
tag and reference number
insert() insert a vdata or a vgroup in the vgroup, given
the vdata or vgroup instance
deleting a member from a vgroup
delete() remove from the vgroup the HDF object identified
by the given tag and reference number
querying vgroup
attrinfo() return info about all the vgroup attributes
inqtagref() determine if the HDF object with the given
tag and reference number belongs to the vgroup
isvg() determine if the member with the given reference
number is a vgroup object
isvs() determine if the member with the given reference
number is a vdata object
nrefs() return the number of vgroup members with the
given tag
tagref() get the tag and reference number of a vgroup
member, given the index number of that member
tagrefs() get the tags and reference numbers of all the
vgroup members
VGAttr The VGAttr class provides methods to set and query vgroup
attributes.
To create an instance of this class, call the attr() method
of a VG instance.
Remember that vgroup attributes can also be set and queried by
applying the standard python "dot notation" on a VG instance.
get attibute value(s)
get() obtain the attribute value(s)
set attribute value(s)
set() set the attribute to the given value(s) of the
given type, first creating the attribute if
necessary
query attribute info
info() retrieve attribute name, data type, order and
size
Attribute access: low and high level
------------------------------------
The V API allows setting attributes on vgroups. Attributes can be of many
types (int, float, char) of different bit lengths (8, 16, 32, 64 bits),
and can be single or multi-valued. Values of a multi-valued attribute must
all be of the same type.
Attributes can be set and queried in two different ways. First, given a
VG instance (describing a vgroup object), the attr() method of that instance
is called to create a VGAttr instance representing the wanted attribute
(possibly non existent). The set() method of this VGAttr instance is then
called to define the attribute value, creating it if it does not already
exist. The get() method returns the current attribute value. Here is an
example.
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> v = f.vgstart() # init vgroup interface
>>> vg = v.attach('vtest', 1) # attach vgroup 'vtest' in write mode
>>> attr = vg.attr('version') # prepare to define the 'version' attribute
# on the vdata
>>> attr.set(HC.CHAR8,'1.0') # set attribute 'version' to string '1.0'
>>> print(attr.get()) # get and print attribute value
>>> attr = vg .attr('range') # prepare to define attribute 'range'
>>> attr.set(HC.INT32,(-10, 15)) # set attribute 'range' to a pair of ints
>>> print(attr.get()) # get and print attribute value
>>> vg.detach() # "close" the vgroup
>>> v.end() # terminate the vgroup interface
>>> f.close() # close the HDF file
The second way consists of setting/querying an attribute as if it were a
normal python class attribute, using the usual dot notation. Above example
then becomes:
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> v = f.vgstart() # init vgroup interface
>>> vg = v.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> vg.version = '1.0' # create vdata attribute 'version',
# setting it to string '1.0'
>>> print(vg.version) # print attribute value
>>> vg.range = (-10, 15) # create attribute 'range', setting
# it to the pair of ints (-10, 15)
>>> print(vg.range) # print attribute value
>>> vg.detach() # "close" the vdata
>>> v.end() # terminate the vdata interface
>>> f.close() # close the HDF file
Note how the dot notation greatly simplifies and clarifies the code.
Some latitude is however lost by manipulating attributes in that way,
because the pyhdf package, not the programmer, is then responsible of
setting the attribute type. The attribute type is chosen to be one of:
=========== ====================================
HC.CHAR8 if the attribute value is a string
HC.INT32 if all attribute values are integers
HC.FLOAT64 otherwise
=========== ====================================
The first way of handling attribute values must be used if one wants to
define an attribute of any other type (for ex. 8 or 16 bit integers,
signed or unsigned). Also, only a VDAttr instance gives access to attribute
info, through its info() method.
However, accessing HDF attributes as if they were python attributes raises
an important issue. There must exist a way to assign generic attributes
to the python objects without requiring those attributes to be converted
to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" HDF attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
the object dictionnary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
for attributes holding string values. An attribute initialized with an
'n' character string is simply a character attribute of order 'n' (eg a
character array of length 'n'). If 'vg' is a vgroup and we initialize its
'a1' attribute as 'vg.a1 = "abcdef"', then a subsequent update attempt
like 'vg.a1 = "12"' will fail, because we then try to change the order
of the attribute (from 6 to 2). It is mandatory to keep the length of string
attributes constant.
Predefined attributes
---------------------
The VG class supports predefined attributes to get (and occasionnaly set)
attribute values easily using the usual python "dot notation", without
having to call a class method. The names of predefined attributes all start
with an underscore ('_').
In the following table, the RW column holds an X if the attribute
is read/write.
VG predefined attributes
=========== === =========================== ===================
name RW description C library routine
=========== === =========================== ===================
_class X class name Vgetclass/Vsetclass
_name X vgroup name Vgetname/Vsetname
_nattrs number of vgroup attributes Vnattrs
_nmembers number of vgroup members Vntagrefs
_refnum vgroup reference number VQueryref
_tag vgroup tag VQuerytag
_version vgroup version number Vgetversion
=========== === =========================== ===================
Programming models
------------------
Creating and initializing a vgroup
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following program shows how to create and initialize a vgroup inside
an HDF file. It can serve as a model for any program wanting to create
a vgroup::
from pyhdf.HDF import *
from pyhdf.V import *
from pyhdf.VS import *
from pyhdf.SD import *
def vdatacreate(vs, name):
# Create vdata and define its structure
vd = vs.create(name,
(('partid',HC.CHAR8, 5), # 5 char string
('description',HC.CHAR8, 10), # 10 char string field
('qty',HC.INT16, 1), # 1 16 bit int field
('wght',HC.FLOAT32, 1), # 1 32 bit float
('price',HC.FLOAT32,1) # 1 32 bit float
))
# Store records
vd.write((('Q1234', 'bolt',12, 0.01, 0.05), # record 1
('B5432', 'brush', 10, 0.4, 4.25), # record 2
('S7613', 'scissor', 2, 0.2, 3.75) # record 3
))
# "close" vdata
vd.detach()
def sdscreate(sd, name):
# Create a simple 3x3 float array.
sds = sd.create(name, SDC.FLOAT32, (3,3))
# Initialize array
sds[:] = ((0,1,2),(3,4,5),(6,7,8))
# "close" dataset.
sds.endaccess()
# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE|HC.CREATE)
# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE) # SD interface
vs = hdf.vstart() # vdata interface
v = hdf.vgstart() # vgroup interface
# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')
# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')
# Create vgroup named 'TOTAL'.
vg = v.create('TOTAL')
# Add vdata to the vgroup
vg.insert(vd)
# We could also have written this:
# vgroup.add(vd._tag, vd._refnum)
# or this:
# vgroup.add(HC.DFTAG_VH, vd._refnum)
# Add dataset to the vgroup
vg.add(HC.DFTAG_NDG, sds.ref())
# Close vgroup, vdata and dataset.
vg.detach() # vgroup
vd.detach() # vdata
sds.endaccess() # dataset
# Terminate V, VS and SD interfaces.
v.end() # V interface
vs.end() # VS interface
sd.end() # SD interface
# Close HDF file.
hdf.close()
The program starts by defining two functions vdatacreate() and sdscreate(),
which will serve to create the vdata and dataset objects we need. Those
functions are not essential to the example. They simply help to make the
example self-contained. Refer to the VS and SD module documentation for
additional explanations about how these functions work.
After opening the HDF file in write mode, the SD, V and VS interfaces are
initialized on the file. Next vdatacreate() is called to create a new vdata
named 'INVENTORY' on the VS instance, and sdscreate() to create a new
dataset named 'ARR_3x3' on the SD instance. This is done so that we have a
vdata and a dataset to play with.
The vdata and the dataset are then attached ("opened"). The create()
method of the V instance is then called to create a new vgroup named
'TOTAL'. The vgroup is then populated by calling its insert() method to add
the vdata 'INVENTORY', and its add() method to add the 'ARR_3x3' dataset.
Note that insert() is just a commodity method that simplifies adding a
vdata or a vgroup to a vgroup, avoiding the need to pass an object tag and
reference number. There is no such commodity method for adding a dataset
to a vgroup. The dataset must be added by specifying its tag and reference
number. Note that the tags to be used are defined inside the HDF module as
constants of the HC class: DFTAG_NDG for a dataset, DFTAG_VG for a vgroup,
DFTAG_VH for a vdata.
The program ends by detaching ("closing") the HDF objects created above,
terminating the three interfaces initialized, and closing the HDF file.
Reading a vgroup
^^^^^^^^^^^^^^^^
The following program shows the contents of the vgroups contained inside
any HDF file::
from pyhdf.HDF import *
from pyhdf.V import *
from pyhdf.VS import *
from pyhdf.SD import *
import sys
def describevg(refnum):
# Describe the vgroup with the given refnum.
# Open vgroup in read mode.
vg = v.attach(refnum)
print "----------------"
print "name:", vg._name, "class:",vg._class, "tag,ref:",
print vg._tag, vg._refnum
# Show the number of members of each main object type.
print "members: ", vg._nmembers,
print "datasets:", vg.nrefs(HC.DFTAG_NDG),
print "vdatas: ", vg.nrefs(HC.DFTAG_VH),
print "vgroups: ", vg.nrefs(HC.DFTAG_VG)
# Read the contents of the vgroup.
members = vg.tagrefs()
# Display info about each member.
index = -1
for tag, ref in members:
index += 1
print "member index", index
# Vdata tag
if tag == HC.DFTAG_VH:
vd = vs.attach(ref)
nrecs, intmode, fields, size, name = vd.inquire()
print " vdata:",name, "tag,ref:",tag, ref
print " fields:",fields
print " nrecs:",nrecs
vd.detach()
# SDS tag
elif tag == HC.DFTAG_NDG:
sds = sd.select(sd.reftoindex(ref))
name, rank, dims, type, nattrs = sds.info()
print " dataset:",name, "tag,ref:", tag, ref
print " dims:",dims
print " type:",type
sds.endaccess()
# VS tag
elif tag == HC.DFTAG_VG:
vg0 = v.attach(ref)
print " vgroup:", vg0._name, "tag,ref:", tag, ref
vg0.detach()
# Unhandled tag
else:
print "unhandled tag,ref",tag,ref
# Close vgroup
vg.detach()
# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)
# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v = hdf.vgstart()
# Scan all vgroups in the file.
ref = -1
while 1:
try:
ref = v.getid(ref)
except HDF4Error,msg: # no more vgroup
break
describevg(ref)
# Terminate V, VS and SD interfaces.
v.end()
vs.end()
sd.end()
# Close HDF file.
hdf.close()
The program starts by defining function describevg(), which is passed the
reference number of the vgroup to display. The function assumes that the
SD, VS and V interfaces have been previously initialized.
The function starts by attaching ("opening") the vgroup, and displaying
its name, class, tag and reference number. The number of members of the
three most important object types is then displayed, by calling the nrefs()
method with the predefined tags found inside the HDF.HC class.
The tagrefs() method is then called to get a list of all the vgroup members,
each member being identified by its tag and reference number. A 'for'
statement is entered to loop over each element of this list. The tag is
tested against the known values defined in the HDF.HC class: the outcome of
this test indicates how to process the member object.
A DFTAG_VH tag indicates we deal with a vdata. The vdata is attached, its
inquire() method called to display info about it, and the vdata is detached.
In the case of a DFTAG_NFG, we are facing a dataset. The dataset is
selected, info is obtained by calling the dataset info() method, and the
dataset is released. A DFTAG_VG indicates that the member is a vgroup. We
attach it, print its name, tag and reference number, then detach the
member vgroup. A warning is finally displayed if we hit upon a member of
an unknown type.
The function releases the vgroup just displayed and returns.
The main program starts by opening in readonly mode the HDF file passed
as argument on the command line. The SD, VS and V interfaces are
initialized, and the corresponding class instances are stored inside 'sd',
'vs' and 'v' global variables, respectively, for the use of the
describevg() function.
A while loop is then entered to access each vgroup in the file. A reference
number of -1 is passed on the first call to getid() to obtain the reference
number of the first vgroup. getid() returns a new reference number on each
subsequent call, and raises an exception when the last vgroup has been
retrieved. This exception is caught to break out of the loop, otherwise
describevg() is called to display the vgroup we have on hand.
Once the loop is over, the interfaces initialized before are terminated,
and the HDF file is closed.
You will notice that this program will display vgroups other than those
you have explicitly created. Those supplementary vgroups are created
by the HDF library for its own internal needs.
"""
import os, sys, types
from . import hdfext as _C
from .six.moves import xrange
from .HC import HC
from .VS import VD
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.V import *"
# statement
__all__ = ['V', 'VG', 'VGAttr']
class V(object):
"""The V class implements the V (Vgroup) interface applied to an
HDF file.
To instantiate a V class, call the vgstart() method of an
HDF instance. """
def __init__(self, hinst):
# Not to be called directly by the user.
# A V object is instantiated using the vgstart()
# method of an HDF instance.
# Args:
# hinst HDF instance
# Returns:
# A V instance
#
# C library equivalent : Vstart (rather: Vinitialize)
# Private attributes:
# _hdf_inst: HDF instance
# Note: Vstart is just a macro; use 'Vinitialize' instead
# Note also thet the same C function is used to initialize the
# VS interface.
status = _C.Vinitialize(hinst._id)
_checkErr('V', status, "cannot initialize V interface")
self._hdf_inst = hinst
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._hdf_inst:
self.end()
except:
pass
def end(self):
"""Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
# Note also the the same C function is used to end
# the VS interface
_checkErr('vend', _C.Vfinish(self._hdf_inst._id),
"cannot terminate V interface")
self._hdf_inst = None
def attach(self, num_name, write=0):
"""Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach
"""
if isinstance(num_name, bytes):
num = self.find(num_name)
else:
num = num_name
vg_id = _C.Vattach(self._hdf_inst._id, num,
write and 'w' or 'r')
_checkErr('vattach', vg_id, "cannot attach Vgroup")
return VG(self, vg_id)
def create(self, name):
"""Create a new vgroup, and assign it a name.
Args::
name name to assign to the new vgroup
Returns::
VG instance for the new vgroup
A create(name) call is equivalent to an attach(-1, 1) call,
followed by a call to the setname(name) method of the instance.
C library equivalent : no equivalent
"""
vg = self.attach(-1, 1)
vg._name = name
return vg
def find(self, name):
"""Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfind(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum
def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum
def delete(self, num_name):
"""Delete from the HDF file the vgroup identified by its
reference number or its name.
Args::
num_name either the reference number or the name of
the vgroup to delete
Returns::
None
C library equivalent : Vdelete
"""
try:
vg = self.attach(num_name, 1)
except HDF4Error as msg:
raise HDF4Error("delete: no such vgroup")
# ATTENTION: The HDF documentation says that the vgroup_id
# is passed to Vdelete(). This is wrong.
# The vgroup reference number must instead be passed.
refnum = vg._refnum
vg.detach()
_checkErr('delete', _C.Vdelete(self._hdf_inst._id, refnum),
"error deleting vgroup")
def getid(self, ref):
"""Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args::
ref reference number of the vgroup after which to search;
set to -1 to start the search at the start of
the HDF file
Returns::
reference number of the vgroup past the one identified by 'ref'
An exception is raised if the end of the vgroup is reached.
C library equivalent : Vgetid
"""
num = _C.Vgetid(self._hdf_inst._id, ref)
_checkErr('getid', num, "bad arguments or last vgroup reached")
return num
class VG(object):
"""The VG class encapsulates the functionnality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance."""
def __init__(self, vinst, id):
# This construtor is not intended to be called directly
# by the user program. The attach() method of an
# V class instance should be called instead.
# Arg:
# vinst V instance from which the call is made
# id vgroup identifier
# Private attributes:
# _v_inst V instance to which the vdata belongs
# _id vgroup identifier
self._v_inst = vinst
self._id = id
def __del__(self):
"""Delete the instance, first calling the detach() method
if not already done. """
try:
if self._id:
self.detach()
except:
pass
def __getattr__(self, name):
"""Some vgroup properties can be queried/set through the following
attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes. Most are read-only.
Only the _class and _name attributes can be modified.
Name RO Description C library routine
----- -- ----------------- -----------------
_class class name Vgetclass/Vsetlass
_name name Vgetname/Vsetname
_nattrs X number of attributes Vnattrs
_nmembers X number of vgroup members Vntagrefs
_refnum X reference number VQueryref
_tag X tag VQuerytag
_version X version number Vgetversion
"""
# NOTE: python will call this method only if the attribute
# is not found in the object dictionnary.
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute
if name == "_class":
status, nm = _C.Vgetclass(self._id)
_checkErr('_class', status, 'cannot get vgroup class')
return nm
elif name == "_name":
status, nm = _C.Vgetname(self._id)
_checkErr('_name', status, 'cannot get vgroup name')
return nm
elif name == "_nattrs":
n = _C.Vnattrs(self._id)
_checkErr('_nattrs', n, 'cannot get number of attributes')
return n
elif name == "_nmembers":
n = _C.Vntagrefs(self._id)
_checkErr('refnum', n, 'cannot get vgroup number of members')
return n
elif name == "_refnum":
n = _C.VQueryref(self._id)
_checkErr('refnum', n, 'cannot get vgroup reference number')
return n
elif name == "_tag":
n = _C.VQuerytag(self._id)
_checkErr('_tag', n, 'cannot get vgroup tag')
return n
elif name == "_version":
n = _C.Vgetversion(self._id)
_checkErr('_tag', n, 'cannot get vgroup version')
return n
else:
raise AttributeError
def __setattr__(self, name, value):
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
# Forbid assigning to readonly attributes
if name in ["_nattrs", "_nmembers", "_refnum", "_tag", "_version"]:
raise AttributeError("%s: read-only attribute" % name)
# Read-write predefined attributes
elif name == "_class":
_checkErr(name, _C.Vsetclass(self._id, value),
'cannot set _class property')
elif name == "_name":
_checkErr(name, _C.Vsetname(self._id, value),
'cannot set _name property')
# Try to set a user-defined attribute.
else:
_setattr(self, name, value)
def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index
def add(self, tag, ref):
"""Add to the vgroup an object identified by its tag and
reference number.
Args::
tag tag of the object to add
ref reference number of the object to add
Returns::
total number of objects in the vgroup after the addition
C library equivalent : Vaddtagref
"""
n = _C.Vaddtagref(self._id, tag, ref)
_checkErr('addtagref', n, 'invalid arguments')
return n
def delete(self, tag, ref):
"""Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref
"""
_checkErr('delete', _C.Vdeletetagref(self._id, tag, ref),
"error deleting member")
def detach(self):
"""Terminate access to the vgroup.
Args::
no argument
Returns::
None
C library equivalent : Vdetach
"""
_checkErr('detach', _C.Vdetach(self._id),
"cannot detach vgroup")
self._id = None
def tagref(self, index):
"""Get the tag and reference number of a vgroup member,
given the index number of that member.
Args::
index member index (0 based)
Returns::
2-element tuple:
- member tag
- member reference number
C library equivalent : Vgettagref
"""
status, tag, ref = _C.Vgettagref(self._id, index)
_checkErr('tagref', status, "illegal arguments")
return tag, ref
def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret
def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref)
def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n
def isvg(self, ref):
"""Determines if the member of a vgoup is a vgroup.
Args::
ref reference number of the member to check
Returns::
False (0) if the member is not a vgroup
True (1) otherwise
C library equivalent : Visvg
"""
return _C.Visvg(self._id, ref)
def isvs(self, ref):
"""Determines if the member of a vgoup is a vdata.
Args::
ref reference number of the member to check
Returns::
False (0) if the member is not a vdata,
True (1) otherwise
C library equivalent : Visvs
"""
return _C.Visvs(self._id, ref)
def attr(self, name_index):
"""Create a VGAttr instance representing a vgroup attribute.
Args::
name_index attribute name or attribute index number; if a
name is given the attribute may not exist; in that
case, it will be created when the VGAttr
instance set() method is called
Returns::
VGAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VGAttr(self, name_index)
def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
class VGAttr(object):
"""The VGAttr class encapsulates methods used to set and query
attributes defined on a vgroup. To create an instance of this class,
call the attr() method of a VG class. """
def __init__(self, obj, name_or_index):
# This constructor should not be called directly by the user
# program. The attr() method of a VG class must be called to
# instantiate this class.
# Args:
# obj VG instance to which the attribute belongs
# name_or_index name or index of the attribute; if a name is
# given, an attribute with that name will be
# searched, if not found, a new index number will
# be generated
# Private attributes:
# _v_inst V instance
# _index attribute index or None
# _name attribute name or None
self._v_inst = obj
# Name is given. Attribute may exist or not.
if isinstance(name_or_index, type('')):
self._name = name_or_index
self._index = _C.Vfindattr(self._v_inst._id, self._name)
if self._index < 0:
self._index = None
# Index is given. Attribute must exist.
else:
self._index = name_or_index
status, self._name, data_type, n_values, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('attr', status, 'non-existent attribute')
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type HC.CHAR8) where the
values are returned as a string
Note that a vgroup attribute can also be queried like a standard
python class attribute by applying the usual "dot notation" to a
VG instance.
C library equivalent : Vgetattr
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
# Obtain attribute type and the number of values.
status, aName, data_type, n_values, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('get', status, 'illegal parameters')
# Get attribute value.
convert = _array_to_ret
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("get: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.Vgetattr(self._v_inst._id, self._index, buf)
_checkErr('get', status, 'illegal attribute ')
return convert(buf, n_values)
def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
Note that a vgroup attribute can also be set like a standard
python class attribute by applying the usual "dot notation" to a
VG instance.
C library equivalent : Vsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.Vsetattr(self._v_inst._id, self._name, data_type,
n_values, buf)
_checkErr('set', status, 'cannot execute')
# Update the attribute index
self._index = _C.Vfindattr(self._v_inst._id, self._name)
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index")
def info(self):
"""Retrieve info about the attribute.
Args::
no argument
Returns::
4-element tuple with the following components:
-attribute name
-attribute data type (one of HC.xxx constants)
-attribute order (number of values)
-attribute size in bytes
C library equivalent : Vattrinfo
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
status, name, type, order, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('info', status, "execution error")
return name, type, order, size
###########################
# Support functions
###########################
def _setattr(obj, name, value):
# Called by the __setattr__ method of the VG object.
#
# obj instance on which the attribute is set
# name attribute name
# value attribute value
# Treat a name starting with and underscore as that of a
# standard python instance attribute.
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not bytes in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == bytes and not typeList:
typeList.append(t)
else:
typeList = []
break
if bytes in typeList:
xtype = HC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = HC.FLOAT64
elif int in typeList:
xtype = HC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = obj.attr(name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
| {
"repo_name": "fhs/python-hdf4",
"path": "pyhdf/V.py",
"copies": "1",
"size": "53723",
"license": "mit",
"hash": -4833337207382305000,
"line_mean": 33.0665821179,
"line_max": 76,
"alpha_frac": 0.5839398395,
"autogenerated": false,
"ratio": 4.266100214404828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5350040053904828,
"avg_score": null,
"num_lines": null
} |
"""
V (Vgroup) API (:mod:`pyhdf.V`)
===============================
A module of the pyhdf package implementing the V (Vgroup)
API of the NCSA HDF4 library.
(see: hdf.ncsa.uiuc.edu)
Introduction
------------
V is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
The V API supports the definition of vgroups inside an HDF file. A vgroup
can thought of as a collection of arbitrary "references" to other HDF
objects defined in the same file. A vgroup may hold references to
other vgroups. It is thus possible to organize HDF objects into some sort
of a hierarchy, similar to files grouped into a directory tree under unix.
This vgroup hierarchical nature partly explains the origin of the "HDF"
name (Hierarchical Data Format). vgroups can help logically organize the
contents of an HDF file, for example by grouping together all the datasets
belonging to a given experiment, and subdividing those datasets according
to the day of the experiment, etc.
The V API provides functions to find and access an existing vgroup,
create a new one, delete a vgroup, identify the members of a vgroup, add
and remove members to and from a vgroup, and set and query attributes
on a vgroup. The members of a vgroup are identified through their tags
and reference numbers. Tags are constants identifying each main object type
(dataset, vdata, vgroup). Reference numbers serve to distinguish among
objects of the same type. To add an object to a vgroup, one must first
initialize that object using the API proper to that object type (eg: SD for
a dataset) so as to create a reference number for that object, and then
pass this reference number and the type tag to the V API. When reading the
contents of a vgroup, the V API returns the tags and reference numbers of
the objects composing the vgroup. The user program must then call the
proper API to process each object, based on tag of this object (eg: VS for
a tag identifying a vdata object).
Some limitations of the V API must be stressed. First, HDF imposes
no integrity constraint whatsoever on the contents of a vgroup, nor does it
help maintain such integrity. For example, a vgroup is not strictly
hierarchical, because an object can belong to more than one vgroup. It would
be easy to create vgroups showing cycles among their members. Also, a vgroup
member is simply a reference to an HDF object. If this object is afterwards
deleted for any reason, the vgroup membership will not be automatically
updated. The vgroup will refer to a non-existent object and thus be left
in an inconsistent state. Nothing prevents adding the same member more than
once to a vgroup, and giving the same name to more than one vgroup.
Finally, the HDF library seems to make heavy use of vgroups for its own
internal needs, and creates vgroups "behind the scenes". This may make it
difficult to pick up "user defined" vgroups when browsing an HDF file.
Accessing the V module
-----------------------
To access the V module a python program can say one of:
>>> import pyhdf.V # must prefix names with "pyhdf.V."
>>> from pyhdf import V # must prefix names with "V."
>>> from pyhdf.V import * # names need no prefix
This document assumes the last import style is used.
V is not self-contained, and needs functionnality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
>>> from .HDF import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the V API.
_hdfext
C extension module responsible for wrapping the HDF
C library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
HDF
python module providing support to the V module
V
python module wrapping the V API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only HDF and V should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for the V module to
work.
HDF (v4) library
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"http://hdf.ncsa.uiuc.edu/obtain.html".
Numeric is also needed by the SD module. See the SD module documentation.
Summary of differences between the pyhdf and C V API
-----------------------------------------------------
Most of the differences between the pyhdf and C V API can
be summarized as follows.
-In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
-In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
sequence of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors reported by the C V API with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
V needs support from the HDF module
------------------------------------
The VS module is not self-contained (countrary to the SD module).
It requires help from the HDF module, namely:
- the HDF.HDF class to open and close the HDF file, and initialize the
V interface
- the HDF.HC class to provide different sorts of constants (opening modes,
data types, etc).
A program wanting to access HDF vgroups will almost always need to execute
the following minimal set of calls:
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> hdfFile = HDF(name, HC.xxx)# open HDF file
>>> v = hdfFile.vgstart() # initialize V interface on HDF file
>>> ... # manipulate vgroups
>>> v.end() # terminate V interface
>>> hdfFile.close() # close HDF file
Classes summary
---------------
pyhdf wraps the V API using the following python classes::
V HDF V interface
VG vgroup
VGAttr vgroup attribute
In more detail::
V The V class implements the V (Vgroup) interface applied to an
HDF file.
To instantiate a V class, call the vgstart() method of an
HDF instance.
methods:
constructors
attach() open an existing vgroup given its name or its
reference number, or create a new vgroup,
returning a VG instance for that vgroup
create() create a new vgroup, returning a VG instance
for that vgroup
closing the interface
end() close the V interface on the HDF file
deleting a vgroup
delete() delete the vgroup identified by its name or
its reference number
searching
find() find a vgroup given its name, returning
the vgroup reference number
findclass() find a vgroup given its class name, returning
the vgroup reference number
getid() return the reference number of the vgroup
following the one with the given reference number
VG The VG class encapsulates the functionnality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance.
constructors
attr() return a VGAttr instance representing an attribute
of the vgroup
findattr() search the vgroup for a given attribute,
returning a VGAttr instance for that attribute
ending access to a vgroup
detach() terminate access to the vgroup
adding a member to a vgroup
add() add to the vgroup the HDF object identified by its
tag and reference number
insert() insert a vdata or a vgroup in the vgroup, given
the vdata or vgroup instance
deleting a member from a vgroup
delete() remove from the vgroup the HDF object identified
by the given tag and reference number
querying vgroup
attrinfo() return info about all the vgroup attributes
inqtagref() determine if the HDF object with the given
tag and reference number belongs to the vgroup
isvg() determine if the member with the given reference
number is a vgroup object
isvs() determine if the member with the given reference
number is a vdata object
nrefs() return the number of vgroup members with the
given tag
tagref() get the tag and reference number of a vgroup
member, given the index number of that member
tagrefs() get the tags and reference numbers of all the
vgroup members
VGAttr The VGAttr class provides methods to set and query vgroup
attributes.
To create an instance of this class, call the attr() method
of a VG instance.
Remember that vgroup attributes can also be set and queried by
applying the standard python "dot notation" on a VG instance.
get attibute value(s)
get() obtain the attribute value(s)
set attribute value(s)
set() set the attribute to the given value(s) of the
given type, first creating the attribute if
necessary
query attribute info
info() retrieve attribute name, data type, order and
size
Attribute access: low and high level
------------------------------------
The V API allows setting attributes on vgroups. Attributes can be of many
types (int, float, char) of different bit lengths (8, 16, 32, 64 bits),
and can be single or multi-valued. Values of a multi-valued attribute must
all be of the same type.
Attributes can be set and queried in two different ways. First, given a
VG instance (describing a vgroup object), the attr() method of that instance
is called to create a VGAttr instance representing the wanted attribute
(possibly non existent). The set() method of this VGAttr instance is then
called to define the attribute value, creating it if it does not already
exist. The get() method returns the current attribute value. Here is an
example.
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> v = f.vgstart() # init vgroup interface
>>> vg = v.attach('vtest', 1) # attach vgroup 'vtest' in write mode
>>> attr = vg.attr('version') # prepare to define the 'version' attribute
# on the vdata
>>> attr.set(HC.CHAR8,'1.0') # set attribute 'version' to string '1.0'
>>> print(attr.get()) # get and print attribute value
>>> attr = vg .attr('range') # prepare to define attribute 'range'
>>> attr.set(HC.INT32,(-10, 15)) # set attribute 'range' to a pair of ints
>>> print(attr.get()) # get and print attribute value
>>> vg.detach() # "close" the vgroup
>>> v.end() # terminate the vgroup interface
>>> f.close() # close the HDF file
The second way consists of setting/querying an attribute as if it were a
normal python class attribute, using the usual dot notation. Above example
then becomes:
>>> from pyhdf.HDF import *
>>> from pyhdf.V import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> v = f.vgstart() # init vgroup interface
>>> vg = v.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> vg.version = '1.0' # create vdata attribute 'version',
# setting it to string '1.0'
>>> print(vg.version) # print attribute value
>>> vg.range = (-10, 15) # create attribute 'range', setting
# it to the pair of ints (-10, 15)
>>> print(vg.range) # print attribute value
>>> vg.detach() # "close" the vdata
>>> v.end() # terminate the vdata interface
>>> f.close() # close the HDF file
Note how the dot notation greatly simplifies and clarifies the code.
Some latitude is however lost by manipulating attributes in that way,
because the pyhdf package, not the programmer, is then responsible of
setting the attribute type. The attribute type is chosen to be one of:
=========== ====================================
HC.CHAR8 if the attribute value is a string
HC.INT32 if all attribute values are integers
HC.FLOAT64 otherwise
=========== ====================================
The first way of handling attribute values must be used if one wants to
define an attribute of any other type (for ex. 8 or 16 bit integers,
signed or unsigned). Also, only a VDAttr instance gives access to attribute
info, through its info() method.
However, accessing HDF attributes as if they were python attributes raises
an important issue. There must exist a way to assign generic attributes
to the python objects without requiring those attributes to be converted
to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" HDF attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
the object dictionnary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
for attributes holding string values. An attribute initialized with an
'n' character string is simply a character attribute of order 'n' (eg a
character array of length 'n'). If 'vg' is a vgroup and we initialize its
'a1' attribute as 'vg.a1 = "abcdef"', then a subsequent update attempt
like 'vg.a1 = "12"' will fail, because we then try to change the order
of the attribute (from 6 to 2). It is mandatory to keep the length of string
attributes constant.
Predefined attributes
---------------------
The VG class supports predefined attributes to get (and occasionnaly set)
attribute values easily using the usual python "dot notation", without
having to call a class method. The names of predefined attributes all start
with an underscore ('_').
In the following table, the RW column holds an X if the attribute
is read/write.
VG predefined attributes
=========== === =========================== ===================
name RW description C library routine
=========== === =========================== ===================
_class X class name Vgetclass/Vsetclass
_name X vgroup name Vgetname/Vsetname
_nattrs number of vgroup attributes Vnattrs
_nmembers number of vgroup members Vntagrefs
_refnum vgroup reference number VQueryref
_tag vgroup tag VQuerytag
_version vgroup version number Vgetversion
=========== === =========================== ===================
Programming models
------------------
Creating and initializing a vgroup
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following program shows how to create and initialize a vgroup inside
an HDF file. It can serve as a model for any program wanting to create
a vgroup::
from pyhdf.HDF import *
from pyhdf.V import *
from pyhdf.VS import *
from pyhdf.SD import *
def vdatacreate(vs, name):
# Create vdata and define its structure
vd = vs.create(name,
(('partid',HC.CHAR8, 5), # 5 char string
('description',HC.CHAR8, 10), # 10 char string field
('qty',HC.INT16, 1), # 1 16 bit int field
('wght',HC.FLOAT32, 1), # 1 32 bit float
('price',HC.FLOAT32,1) # 1 32 bit float
))
# Store records
vd.write((('Q1234', 'bolt',12, 0.01, 0.05), # record 1
('B5432', 'brush', 10, 0.4, 4.25), # record 2
('S7613', 'scissor', 2, 0.2, 3.75) # record 3
))
# "close" vdata
vd.detach()
def sdscreate(sd, name):
# Create a simple 3x3 float array.
sds = sd.create(name, SDC.FLOAT32, (3,3))
# Initialize array
sds[:] = ((0,1,2),(3,4,5),(6,7,8))
# "close" dataset.
sds.endaccess()
# Create HDF file
filename = 'inventory.hdf'
hdf = HDF(filename, HC.WRITE|HC.CREATE)
# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename, SDC.WRITE) # SD interface
vs = hdf.vstart() # vdata interface
v = hdf.vgstart() # vgroup interface
# Create vdata named 'INVENTORY'.
vdatacreate(vs, 'INVENTORY')
# Create dataset named "ARR_3x3"
sdscreate(sd, 'ARR_3x3')
# Attach the vdata and the dataset.
vd = vs.attach('INVENTORY')
sds = sd.select('ARR_3x3')
# Create vgroup named 'TOTAL'.
vg = v.create('TOTAL')
# Add vdata to the vgroup
vg.insert(vd)
# We could also have written this:
# vgroup.add(vd._tag, vd._refnum)
# or this:
# vgroup.add(HC.DFTAG_VH, vd._refnum)
# Add dataset to the vgroup
vg.add(HC.DFTAG_NDG, sds.ref())
# Close vgroup, vdata and dataset.
vg.detach() # vgroup
vd.detach() # vdata
sds.endaccess() # dataset
# Terminate V, VS and SD interfaces.
v.end() # V interface
vs.end() # VS interface
sd.end() # SD interface
# Close HDF file.
hdf.close()
The program starts by defining two functions vdatacreate() and sdscreate(),
which will serve to create the vdata and dataset objects we need. Those
functions are not essential to the example. They simply help to make the
example self-contained. Refer to the VS and SD module documentation for
additional explanations about how these functions work.
After opening the HDF file in write mode, the SD, V and VS interfaces are
initialized on the file. Next vdatacreate() is called to create a new vdata
named 'INVENTORY' on the VS instance, and sdscreate() to create a new
dataset named 'ARR_3x3' on the SD instance. This is done so that we have a
vdata and a dataset to play with.
The vdata and the dataset are then attached ("opened"). The create()
method of the V instance is then called to create a new vgroup named
'TOTAL'. The vgroup is then populated by calling its insert() method to add
the vdata 'INVENTORY', and its add() method to add the 'ARR_3x3' dataset.
Note that insert() is just a commodity method that simplifies adding a
vdata or a vgroup to a vgroup, avoiding the need to pass an object tag and
reference number. There is no such commodity method for adding a dataset
to a vgroup. The dataset must be added by specifying its tag and reference
number. Note that the tags to be used are defined inside the HDF module as
constants of the HC class: DFTAG_NDG for a dataset, DFTAG_VG for a vgroup,
DFTAG_VH for a vdata.
The program ends by detaching ("closing") the HDF objects created above,
terminating the three interfaces initialized, and closing the HDF file.
Reading a vgroup
^^^^^^^^^^^^^^^^
The following program shows the contents of the vgroups contained inside
any HDF file::
from pyhdf.HDF import *
from pyhdf.V import *
from pyhdf.VS import *
from pyhdf.SD import *
import sys
def describevg(refnum):
# Describe the vgroup with the given refnum.
# Open vgroup in read mode.
vg = v.attach(refnum)
print "----------------"
print "name:", vg._name, "class:",vg._class, "tag,ref:",
print vg._tag, vg._refnum
# Show the number of members of each main object type.
print "members: ", vg._nmembers,
print "datasets:", vg.nrefs(HC.DFTAG_NDG),
print "vdatas: ", vg.nrefs(HC.DFTAG_VH),
print "vgroups: ", vg.nrefs(HC.DFTAG_VG)
# Read the contents of the vgroup.
members = vg.tagrefs()
# Display info about each member.
index = -1
for tag, ref in members:
index += 1
print "member index", index
# Vdata tag
if tag == HC.DFTAG_VH:
vd = vs.attach(ref)
nrecs, intmode, fields, size, name = vd.inquire()
print " vdata:",name, "tag,ref:",tag, ref
print " fields:",fields
print " nrecs:",nrecs
vd.detach()
# SDS tag
elif tag == HC.DFTAG_NDG:
sds = sd.select(sd.reftoindex(ref))
name, rank, dims, type, nattrs = sds.info()
print " dataset:",name, "tag,ref:", tag, ref
print " dims:",dims
print " type:",type
sds.endaccess()
# VS tag
elif tag == HC.DFTAG_VG:
vg0 = v.attach(ref)
print " vgroup:", vg0._name, "tag,ref:", tag, ref
vg0.detach()
# Unhandled tag
else:
print "unhandled tag,ref",tag,ref
# Close vgroup
vg.detach()
# Open HDF file in readonly mode.
filename = sys.argv[1]
hdf = HDF(filename)
# Initialize the SD, V and VS interfaces on the file.
sd = SD(filename)
vs = hdf.vstart()
v = hdf.vgstart()
# Scan all vgroups in the file.
ref = -1
while 1:
try:
ref = v.getid(ref)
except HDF4Error,msg: # no more vgroup
break
describevg(ref)
# Terminate V, VS and SD interfaces.
v.end()
vs.end()
sd.end()
# Close HDF file.
hdf.close()
The program starts by defining function describevg(), which is passed the
reference number of the vgroup to display. The function assumes that the
SD, VS and V interfaces have been previously initialized.
The function starts by attaching ("opening") the vgroup, and displaying
its name, class, tag and reference number. The number of members of the
three most important object types is then displayed, by calling the nrefs()
method with the predefined tags found inside the HDF.HC class.
The tagrefs() method is then called to get a list of all the vgroup members,
each member being identified by its tag and reference number. A 'for'
statement is entered to loop over each element of this list. The tag is
tested against the known values defined in the HDF.HC class: the outcome of
this test indicates how to process the member object.
A DFTAG_VH tag indicates we deal with a vdata. The vdata is attached, its
inquire() method called to display info about it, and the vdata is detached.
In the case of a DFTAG_NFG, we are facing a dataset. The dataset is
selected, info is obtained by calling the dataset info() method, and the
dataset is released. A DFTAG_VG indicates that the member is a vgroup. We
attach it, print its name, tag and reference number, then detach the
member vgroup. A warning is finally displayed if we hit upon a member of
an unknown type.
The function releases the vgroup just displayed and returns.
The main program starts by opening in readonly mode the HDF file passed
as argument on the command line. The SD, VS and V interfaces are
initialized, and the corresponding class instances are stored inside 'sd',
'vs' and 'v' global variables, respectively, for the use of the
describevg() function.
A while loop is then entered to access each vgroup in the file. A reference
number of -1 is passed on the first call to getid() to obtain the reference
number of the first vgroup. getid() returns a new reference number on each
subsequent call, and raises an exception when the last vgroup has been
retrieved. This exception is caught to break out of the loop, otherwise
describevg() is called to display the vgroup we have on hand.
Once the loop is over, the interfaces initialized before are terminated,
and the HDF file is closed.
You will notice that this program will display vgroups other than those
you have explicitly created. Those supplementary vgroups are created
by the HDF library for its own internal needs.
"""
import os, sys, types
from . import hdfext as _C
from .six.moves import xrange
from .HC import HC
from .VS import VD
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.V import *"
# statement
__all__ = ['V', 'VG', 'VGAttr']
class V(object):
"""The V class implements the V (Vgroup) interface applied to an
HDF file.
To instantiate a V class, call the vgstart() method of an
HDF instance. """
def __init__(self, hinst):
# Not to be called directly by the user.
# A V object is instantiated using the vgstart()
# method of an HDF instance.
# Args:
# hinst HDF instance
# Returns:
# A V instance
#
# C library equivalent : Vstart (rather: Vinitialize)
# Private attributes:
# _hdf_inst: HDF instance
# Note: Vstart is just a macro; use 'Vinitialize' instead
# Note also thet the same C function is used to initialize the
# VS interface.
status = _C.Vinitialize(hinst._id)
_checkErr('V', status, "cannot initialize V interface")
self._hdf_inst = hinst
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._hdf_inst:
self.end()
except:
pass
def end(self):
"""Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
# Note also the the same C function is used to end
# the VS interface
_checkErr('vend', _C.Vfinish(self._hdf_inst._id),
"cannot terminate V interface")
self._hdf_inst = None
def attach(self, num_name, write=0):
"""Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach
"""
if isinstance(num_name, bytes):
num = self.find(num_name)
else:
num = num_name
vg_id = _C.Vattach(self._hdf_inst._id, num,
write and 'w' or 'r')
_checkErr('vattach', vg_id, "cannot attach Vgroup")
return VG(self, vg_id)
def create(self, name):
"""Create a new vgroup, and assign it a name.
Args::
name name to assign to the new vgroup
Returns::
VG instance for the new vgroup
A create(name) call is equivalent to an attach(-1, 1) call,
followed by a call to the setname(name) method of the instance.
C library equivalent : no equivalent
"""
vg = self.attach(-1, 1)
vg._name = name
return vg
def find(self, name):
"""Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfind(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum
def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum
def delete(self, num_name):
"""Delete from the HDF file the vgroup identified by its
reference number or its name.
Args::
num_name either the reference number or the name of
the vgroup to delete
Returns::
None
C library equivalent : Vdelete
"""
try:
vg = self.attach(num_name, 1)
except HDF4Error as msg:
raise HDF4Error("delete: no such vgroup")
# ATTENTION: The HDF documentation says that the vgroup_id
# is passed to Vdelete(). This is wrong.
# The vgroup reference number must instead be passed.
refnum = vg._refnum
vg.detach()
_checkErr('delete', _C.Vdelete(self._hdf_inst._id, refnum),
"error deleting vgroup")
def getid(self, ref):
"""Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args::
ref reference number of the vgroup after which to search;
set to -1 to start the search at the start of
the HDF file
Returns::
reference number of the vgroup past the one identified by 'ref'
An exception is raised if the end of the vgroup is reached.
C library equivalent : Vgetid
"""
num = _C.Vgetid(self._hdf_inst._id, ref)
_checkErr('getid', num, "bad arguments or last vgroup reached")
return num
class VG(object):
"""The VG class encapsulates the functionnality of a vgroup.
To instantiate a VG class, call the attach() or create() methods
of a V class instance."""
def __init__(self, vinst, id):
# This construtor is not intended to be called directly
# by the user program. The attach() method of an
# V class instance should be called instead.
# Arg:
# vinst V instance from which the call is made
# id vgroup identifier
# Private attributes:
# _v_inst V instance to which the vdata belongs
# _id vgroup identifier
self._v_inst = vinst
self._id = id
def __del__(self):
"""Delete the instance, first calling the detach() method
if not already done. """
try:
if self._id:
self.detach()
except:
pass
def __getattr__(self, name):
"""Some vgroup properties can be queried/set through the following
attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes. Most are read-only.
Only the _class and _name attributes can be modified.
Name RO Description C library routine
----- -- ----------------- -----------------
_class class name Vgetclass/Vsetlass
_name name Vgetname/Vsetname
_nattrs X number of attributes Vnattrs
_nmembers X number of vgroup members Vntagrefs
_refnum X reference number VQueryref
_tag X tag VQuerytag
_version X version number Vgetversion
"""
# NOTE: python will call this method only if the attribute
# is not found in the object dictionnary.
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute
if name == "_class":
status, nm = _C.Vgetclass(self._id)
_checkErr('_class', status, 'cannot get vgroup class')
return nm
elif name == "_name":
status, nm = _C.Vgetname(self._id)
_checkErr('_name', status, 'cannot get vgroup name')
return nm
elif name == "_nattrs":
n = _C.Vnattrs(self._id)
_checkErr('_nattrs', n, 'cannot get number of attributes')
return n
elif name == "_nmembers":
n = _C.Vntagrefs(self._id)
_checkErr('refnum', n, 'cannot get vgroup number of members')
return n
elif name == "_refnum":
n = _C.VQueryref(self._id)
_checkErr('refnum', n, 'cannot get vgroup reference number')
return n
elif name == "_tag":
n = _C.VQuerytag(self._id)
_checkErr('_tag', n, 'cannot get vgroup tag')
return n
elif name == "_version":
n = _C.Vgetversion(self._id)
_checkErr('_tag', n, 'cannot get vgroup version')
return n
else:
raise AttributeError
def __setattr__(self, name, value):
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
# Forbid assigning to readonly attributes
if name in ["_nattrs", "_nmembers", "_refnum", "_tag", "_version"]:
raise AttributeError("%s: read-only attribute" % name)
# Read-write predefined attributes
elif name == "_class":
_checkErr(name, _C.Vsetclass(self._id, value),
'cannot set _class property')
elif name == "_name":
_checkErr(name, _C.Vsetname(self._id, value),
'cannot set _name property')
# Try to set a user-defined attribute.
else:
_setattr(self, name, value)
def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index
def add(self, tag, ref):
"""Add to the vgroup an object identified by its tag and
reference number.
Args::
tag tag of the object to add
ref reference number of the object to add
Returns::
total number of objects in the vgroup after the addition
C library equivalent : Vaddtagref
"""
n = _C.Vaddtagref(self._id, tag, ref)
_checkErr('addtagref', n, 'invalid arguments')
return n
def delete(self, tag, ref):
"""Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref
"""
_checkErr('delete', _C.Vdeletetagref(self._id, tag, ref),
"error deleting member")
def detach(self):
"""Terminate access to the vgroup.
Args::
no argument
Returns::
None
C library equivalent : Vdetach
"""
_checkErr('detach', _C.Vdetach(self._id),
"cannot detach vgroup")
self._id = None
def tagref(self, index):
"""Get the tag and reference number of a vgroup member,
given the index number of that member.
Args::
index member index (0 based)
Returns::
2-element tuple:
- member tag
- member reference number
C library equivalent : Vgettagref
"""
status, tag, ref = _C.Vgettagref(self._id, index)
_checkErr('tagref', status, "illegal arguments")
return tag, ref
def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret
def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref)
def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n
def isvg(self, ref):
"""Determines if the member of a vgoup is a vgroup.
Args::
ref reference number of the member to check
Returns::
False (0) if the member is not a vgroup
True (1) otherwise
C library equivalent : Visvg
"""
return _C.Visvg(self._id, ref)
def isvs(self, ref):
"""Determines if the member of a vgoup is a vdata.
Args::
ref reference number of the member to check
Returns::
False (0) if the member is not a vdata,
True (1) otherwise
C library equivalent : Visvs
"""
return _C.Visvs(self._id, ref)
def attr(self, name_index):
"""Create a VGAttr instance representing a vgroup attribute.
Args::
name_index attribute name or attribute index number; if a
name is given the attribute may not exist; in that
case, it will be created when the VGAttr
instance set() method is called
Returns::
VGAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VGAttr(self, name_index)
def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
class VGAttr(object):
"""The VGAttr class encapsulates methods used to set and query
attributes defined on a vgroup. To create an instance of this class,
call the attr() method of a VG class. """
def __init__(self, obj, name_or_index):
# This constructor should not be called directly by the user
# program. The attr() method of a VG class must be called to
# instantiate this class.
# Args:
# obj VG instance to which the attribute belongs
# name_or_index name or index of the attribute; if a name is
# given, an attribute with that name will be
# searched, if not found, a new index number will
# be generated
# Private attributes:
# _v_inst V instance
# _index attribute index or None
# _name attribute name or None
self._v_inst = obj
# Name is given. Attribute may exist or not.
if isinstance(name_or_index, type('')):
self._name = name_or_index
self._index = _C.Vfindattr(self._v_inst._id, self._name)
if self._index < 0:
self._index = None
# Index is given. Attribute must exist.
else:
self._index = name_or_index
status, self._name, data_type, n_values, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('attr', status, 'non-existent attribute')
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type HC.CHAR8) where the
values are returned as a string
Note that a vgroup attribute can also be queried like a standard
python class attribute by applying the usual "dot notation" to a
VG instance.
C library equivalent : Vgetattr
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
# Obtain attribute type and the number of values.
status, aName, data_type, n_values, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('get', status, 'illegal parameters')
# Get attribute value.
convert = _array_to_ret
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("get: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.Vgetattr(self._v_inst._id, self._index, buf)
_checkErr('get', status, 'illegal attribute ')
return convert(buf, n_values)
def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
Note that a vgroup attribute can also be set like a standard
python class attribute by applying the usual "dot notation" to a
VG instance.
C library equivalent : Vsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.Vsetattr(self._v_inst._id, self._name, data_type,
n_values, buf)
_checkErr('set', status, 'cannot execute')
# Update the attribute index
self._index = _C.Vfindattr(self._v_inst._id, self._name)
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index")
def info(self):
"""Retrieve info about the attribute.
Args::
no argument
Returns::
4-element tuple with the following components:
-attribute name
-attribute data type (one of HC.xxx constants)
-attribute order (number of values)
-attribute size in bytes
C library equivalent : Vattrinfo
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
status, name, type, order, size = \
_C.Vattrinfo(self._v_inst._id, self._index)
_checkErr('info', status, "execution error")
return name, type, order, size
###########################
# Support functions
###########################
def _setattr(obj, name, value):
# Called by the __setattr__ method of the VG object.
#
# obj instance on which the attribute is set
# name attribute name
# value attribute value
# Treat a name starting with and underscore as that of a
# standard python instance attribute.
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not bytes in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == bytes and not typeList:
typeList.append(t)
else:
typeList = []
break
if bytes in typeList:
xtype = HC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = HC.FLOAT64
elif int in typeList:
xtype = HC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = obj.attr(name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
| {
"repo_name": "ryfeus/lambda-packs",
"path": "HDF4_H5_NETCDF/source2.7/pyhdf/V.py",
"copies": "1",
"size": "53727",
"license": "mit",
"hash": 1570582760324417000,
"line_mean": 33.0475285171,
"line_max": 76,
"alpha_frac": 0.5838405271,
"autogenerated": false,
"ratio": 4.261004044729955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003652637654826052,
"num_lines": 1578
} |
# $Id: vrrp.py 23 2006-11-08 15:45:33Z dugsong $
"""Virtual Router Redundancy Protocol."""
import dpkt
class VRRP(dpkt.Packet):
__hdr__ = (
('vtype', 'B', 0x21),
('vrid', 'B', 0),
('priority', 'B', 0),
('count', 'B', 0),
('atype', 'B', 0),
('advtime', 'B', 0),
('sum', 'H', 0),
)
addrs = ()
auth = ''
def _get_v(self):
return self.vtype >> 4
def _set_v(self, v):
self.vtype = (self.vtype & ~0xf) | (v << 4)
v = property(_get_v, _set_v)
def _get_type(self):
return self.vtype & 0xf
def _set_type(self, v):
self.vtype = (self.vtype & ~0xf0) | (v & 0xf)
type = property(_get_v, _set_v)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for off in range(0, 4 * self.count, 4):
l.append(self.data[off:off+4])
self.addrs = l
self.auth = self.data[off+4:]
self.data = ''
def __len__(self):
return self.__hdr_len__ + (4 * self.count) + len(self.auth)
def __str__(self):
data = ''.join(self.addrs) + self.auth
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "ashumeow/pcaphar",
"path": "src/third_party/dpkt/dpkt/vrrp.py",
"copies": "11",
"size": "1266",
"license": "apache-2.0",
"hash": -5168097775377628000,
"line_mean": 25.9361702128,
"line_max": 67,
"alpha_frac": 0.4763033175,
"autogenerated": false,
"ratio": 2.910344827586207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8886648145086207,
"avg_score": null,
"num_lines": null
} |
# $Id: vrrp.py 88 2013-03-05 19:43:17Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Virtual Router Redundancy Protocol."""
import dpkt
from decorators import deprecated
class VRRP(dpkt.Packet):
__hdr__ = (
('_v_type', 'B', 0x21),
('vrid', 'B', 0),
('priority', 'B', 0),
('count', 'B', 0),
('atype', 'B', 0),
('advtime', 'B', 0),
('sum', 'H', 0),
)
addrs = ()
auth = ''
@property
def v(self): # high 4 bits of _v_type
return self._v_type >> 4
@v.setter
def v(self, v):
self._v_type = (self._v_type & 0x0f) | (v << 4)
@property
def type(self): # low 4 bits of _v_type
return self._v_type & 0x0f
@type.setter
def type(self, v):
self._v_type = (self._v_type & 0xf0) | (v & 0x0f)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('v')
def _get_v(self): return self.v
@deprecated('v')
def _set_v(self, v): self.v = v
@deprecated('type')
def _get_type(self): return self.type
@deprecated('type')
def _set_type(self, v): self.type = v
# =================================================
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
off = 0
for off in range(0, 4 * self.count, 4):
l.append(self.data[off:off + 4])
self.addrs = l
self.auth = self.data[off + 4:]
self.data = ''
def __len__(self):
return self.__hdr_len__ + (4 * self.count) + len(self.auth)
def __str__(self):
data = ''.join(self.addrs) + self.auth
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
def test_vrrp():
# no addresses
s = '\x00\x00\x00\x00\x00\x00\xff\xff'
v = VRRP(s)
assert v.sum == 0xffff
assert str(v) == s
# have address
s = '\x21\x01\x64\x01\x00\x01\xba\x52\xc0\xa8\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00'
v = VRRP(s)
assert v.count == 1
assert v.addrs == ['\xc0\xa8\x00\x01'] # 192.168.0.1
assert str(v) == s
# test checksum generation
v.sum = 0
assert str(v) == s
# test length
assert len(v) == len(s)
# test getters
assert v.v == 2
assert v.type == 1
# test setters
v.v = 3
v.type = 2
assert str(v)[0] == '\x32'
if __name__ == '__main__':
test_vrrp()
print 'Tests Successful...'
| {
"repo_name": "jack8daniels2/dpkt",
"path": "dpkt/vrrp.py",
"copies": "6",
"size": "2520",
"license": "bsd-3-clause",
"hash": 6537049532573989000,
"line_mean": 22.5514018692,
"line_max": 90,
"alpha_frac": 0.4916666667,
"autogenerated": false,
"ratio": 2.9577464788732395,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007257539967820342,
"num_lines": 107
} |
# $Id: vrrp.py 88 2013-03-05 19:43:17Z andrewflnr@gmail.com $
"""Virtual Router Redundancy Protocol."""
import dpkt
class VRRP(dpkt.Packet):
__hdr__ = (
('vtype', 'B', 0x21),
('vrid', 'B', 0),
('priority', 'B', 0),
('count', 'B', 0),
('atype', 'B', 0),
('advtime', 'B', 0),
('sum', 'H', 0),
)
addrs = ()
auth = ''
def _get_v(self):
return self.vtype >> 4
def _set_v(self, v):
self.vtype = (self.vtype & ~0xf) | (v << 4)
v = property(_get_v, _set_v)
def _get_type(self):
return self.vtype & 0xf
def _set_type(self, v):
self.vtype = (self.vtype & ~0xf0) | (v & 0xf)
type = property(_get_type, _set_type)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for off in range(0, 4 * self.count, 4):
l.append(self.data[off:off+4])
self.addrs = l
self.auth = self.data[off+4:]
self.data = ''
def __len__(self):
return self.__hdr_len__ + (4 * self.count) + len(self.auth)
def __str__(self):
data = ''.join(self.addrs) + self.auth
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "somethingnew2-0/CS642-HW2",
"path": "dpkt/vrrp.py",
"copies": "2",
"size": "1285",
"license": "mit",
"hash": -4080898741693309000,
"line_mean": 26.3404255319,
"line_max": 67,
"alpha_frac": 0.4824902724,
"autogenerated": false,
"ratio": 2.9204545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9357430403191196,
"avg_score": 0.009102882932670166,
"num_lines": 47
} |
# $Id: VS.py,v 1.4 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.3 2004/08/02 17:06:20 gosselin
# pyhdf-0.7.2
#
# Revision 1.2 2004/08/02 15:36:04 gosselin
# pyhdf-0.7-1
#
# Author: Andre Gosselin
# Maurice-Lamontagne Institute
# gosselina@dfo-mpo.gc.ca
"""
VS (Vdata table) API (:mod:`pyhdf.VS`)
======================================
A module of the pyhdf package implementing the VS (Vdata table)
API of the NCSA HDF4 library.
Introduction
------------
VS is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
VS allows the definition of structured data tables inside an HDF file.
Those tables are designated as "vdatas" (the name has to do with data
associated with the "vertices" of geometrical models, the storage of which
the API was originally designed for). A vdata is composed of a fixed
number of columns (also called fields), where a column can store a fixed
number of data values, all of the same type. The number of values allowed
inside a field is called the "order" of the field. A table is composed of a
varying number of rows (also called records), a record representing the
sequence of values stored in each field of the vdata.
A vdata is associated with a descriptive name, and likewise each field of
the vdata. A vdata can also be tagged with a "class" to further describe the
vdata purpose. Records and fields are identified by a zero-based index.
An arbitrary number of attributes of different types can be attached to
a vdata as a whole, or to its individual fields. An attribute is a
(name, value) pair, where "value" can be of many types, and be either
single or multi-valued. The number of values stored in an attribute is
called the "order" of the attribute.
The following example illustrates a simple vdata that could be stored
inside an HDF file. See section "Programming models" for an example
program implementing this vdata.
INVENTORY (experimental status)
====== =========== === ======== ========
partid description qty wght(lb) price($)
====== =========== === ======== ========
Q1234 bolt 12 0.01 0.05
B5432 brush 10 0.4 4.25
S7613 scissor 2 0.2 3.75
====== =========== === ======== ========
The vdata is composed of 5 fields. 3 records are shown (of course, a vdata
can store much more than that). "INVENTORY" would be the vdata name, and
"partid", "description", etc, would be the field names. The data type varies
between fields. "partid" and "description" would be of "multicharacter" type
(aka "string"), "qty" would be a integer, and "wght" and "price" would be
floats. The text in parentheses could be stored as attributes. A "status"
attribute could be defined for the table as a whole, and given the
value "experimental". Likewise, a "unit" attribute could be associated
with fields "wght" and "price", and given the values "lb" and "$", resp.
The VS API allows one to create, locate and open a vdata inside an
HDF file, update and append records inside it, read records randomly
or sequentially, and access and update the vdata and field attributes.
Attributes can be read and written using the familiar python "dot
notation", and records can be read and written by indexing and slicing the
vdata as if it were a python sequence.
VS module key features
----------------------
VS key features are as follows.
- pyhdf implements almost every routine of the original VS API.
Only a few have been ignored, most of them being of a rare use:
- VSgetblocksize() / VSsetblocksize()
- VSsetnumblocks()
- VSlone
- It is quite straightforward to go from a C version to a python version
of a program accessing the VS API, and to learn VS usage by referring to
the C API documentation.
- A few high-level python methods have been developed to ease
programmers task. Of greatest interest are the following:
- Access to attributes through the familiar "dot notation".
- Indexing and slicing a vdata to read and write its records,
similarly to a python sequence.
- Easy retrieval of info on a vdata and its fields.
- Easy creation of vdatas.
Accessing the VS module
-----------------------
To access the VS module a python program can say one of:
>>> import pyhdf.VS # must prefix names with "pyhdf.VS."
>>> from pyhdf import VS # must prefix names with "VS."
>>> from pyhdf.VS import * # names need no prefix
This document assumes the last import style is used.
VS is not self-contained, and needs functionality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
>>> from .HDF import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the VS API.
_hdfext
C extension module responsible for wrapping the HDF
C library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
HDF
python module providing support to the VS module
VS
python module wrapping the VS API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only HDF and VS should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for VS to
work.
HDF (v4) library
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"https://portal.hdfgroup.org/display/support/Download+HDF4".
Numeric is also needed by the SD module. See the SD module documentation.
Documentation
-------------
pyhdf has been written so as to stick as closely as possible to
the naming conventions and calling sequences documented inside the
"HDF User s Guide" manual. Even if pyhdf gives an OOP twist
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
language have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF VS
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
of the equivalent routine as it is found inside the C API.
This document (in both its text and html versions) has been completely
produced using "pydoc", the Python documentation generator (which
made its debut in the 2.1 Python release). pydoc can also be used
as an on-line help tool. For example, to know everything about
the VS.VD class, say:
>>> from pydoc import help
>>> from pyhdf.VS import *
>>> help(VD)
To be more specific and get help only for the read() method of the
VD class:
>>> help(VD.read)
pydoc can also be called from the command line, as in::
% pydoc pyhdf.VS.VD # doc for the whole VD class
% pydoc pyhdf.VS.VD.read # doc for the VD.read method
Summary of differences between the pyhdf and C VS API
-----------------------------------------------------
Most of the differences between the pyhdf and C VS API can
be summarized as follows.
- In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
- In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
sequence of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors reported by the C VS API with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
VS needs support from the HDF module
------------------------------------
The VS module is not self-contained (countrary to the SD module).
It requires help from the HDF module, namely:
- the HDF.HDF class to open and close the HDF file, and initialize the
VS interface
- the HDF.HC class to provide different sorts of constants (opening modes,
data types, etc).
A program wanting to access HDF vdatas will almost always need to execute
the following minimal set of calls:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> hdfFile = HDF(name, HC.xxx)# open HDF file
>>> vs = hdfFile.vstart() # initialize VS interface on HDF file
>>> ... # manipulate vdatas through "vs"
>>> vs.end() # terminate VS interface
>>> hdfFile.close() # close HDF file
Classes summary
---------------
pyhdf wraps the VS API using different python classes::
VS HDF VS interface
VD vdata
VDField vdata field
VDattr attribute (either at the vdata or field level)
In more detail::
VS The VS class implements the VS (Vdata) interface applied to an
HDF file. This class encapsulates the hdf instance, and all
the top-level functions of the VS API.
To create a VS instance, call the vstart() method of an
HDF instance.
methods:
constructors:
attach() open an existing vdata given its name or
reference number, or create a new one,
returning a VD instance
create() create a new vdata and define its structure,
returning a VD instance
creating and initializing a simple vdata
storedata() create a single-field vdata and initialize
its values
closing the interface
end() close the VS interface on the HDF file
searching
find() get a vdata reference number given its name
next() get the reference number of the vdata following
a given one
inquiry
vdatainfo() return info about all the vdatas in the
HDF file
VD The VD class describes a vdata. It encapsulates
the VS instance to which the vdata belongs, and the vdata
identifier.
To instantiate a VD class, call the attach() or create()
method of a VS class instance.
methods:
constructors
attr() create a VDAttr instance representing a
vdata attribute; "dot notation" can also be
used to access a vdata attribute
field() return a VDField instance representing a given
field of the vdata
closing vdata
detach() end access to the vdata
defining fields
fdefine() define the name, type and order of a new field
setfields() define the field names and field order for
the read() and write() methods; also used to
initialize the structure of a vdata previously
created with the VS.attach() method
reading and writing
note: a vdata can be indexed and sliced like a
python sequence
read() return the values of a number of records
starting at the current record position
seek() reset the current record position
seekend() seek past the last record
tell() return the current record position
write() write a number of records starting at the
current record position
inquiry
attrinfo() return info about all the vdata attributes
fexist() check if a vdata contains a given set of fields
fieldinfo() return info about all the vdata fields
findattr() locate an attribute, returning a VDAttr instance
if found
inquire() return info about the vdata
sizeof() return the size in bytes of one or more fields
VDField The VDField class represents a vdata field. It encapsulates
the VD instance to which the field belongs, and the field
index number.
To instantiate a VDField, call the field() method of a VD class
instance.
methods:
constructors:
attr() return a VDAttr instance representing an
attribute of the field; "dot notation"
can also be used to get/set an attribute.
inquiry
attrinfo() return info about all the field attributes
find() locate an attribute, returning a VDAttr
instance if found
VDAttr The VDAttr class encapsulates methods used to set and query
attributes defined at the level either of the vdata or the
vdata field.
To create an instance of this class, call the attr() or
findattr() methods of a VD instance (for vdata attributes),
or call the attr() or find() methods of a VDField instance
(for field attributes).
methods:
get / set
get() get the attribute value
set() set the attribute value
info
info() retrieve info about the attribute
Data types
----------
Data types come into play when first defining vdata fields and attributes,
and later when querying the definition of those fields and attributes.
Data types are specified using the symbolic constants defined inside the
HC class of the HDF module.
- CHAR and CHAR8 (equivalent): an 8-bit character.
- UCHAR, UCHAR8 and UINT8 (equivalent): unsigned 8-bit values (0 to 255)
- INT8: signed 8-bit values (-128 to 127)
- INT16: signed 16-bit values
- UINT16: unsigned 16 bit values
- INT32: signed 32 bit values
- UINT32: unsigned 32 bit values
- FLOAT32: 32 bit floating point values (C floats)
- FLOAT64: 64 bit floating point values (C doubles)
There is no explicit "string" type. To simulate a string, set the field or
attribute type to CHAR, and set the field or attribute "order" to
a value of 'n' > 1. This creates and "array of characters", close
to a string (except that strings will always be of length 'n', right-padded
with spaces if necessary).
Attribute access: low and high level
------------------------------------
The VS API allow setting attributes on vdatas and vdata fields. Attributes
can be of many types (int, float, char) of different bit lengths (8, 16, 32,
64 bits), and can be single or multi-valued. Values of a multi-valued
attribute must all be of the same type.
Attributes can be set and queried in two different ways. First, given a
VD instance (describing a vdata object) or a VDField instance (describing a
vdata field), the attr() method of that instance is called to create a
VDAttr instance representing the wanted attribute (possibly non existent).
The set() method of this VDAttr instance is then called to define the
attribute value, creating it if it does not already exist. The get() method
returns the current attribute value. Here is an example.
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> attr = vd.attr('version') # prepare to define the 'version' attribute
# on the vdata
>>> attr.set(HC.CHAR8,'1.0') # set attribute 'version' to string '1.0'
>>> print(attr.get()) # get and print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> attr = fld.attr('range') # prepare to define attribute 'range' on
# this field
>>> attr.set(HC.INT32,(-10, 15)) # set attribute 'range' to a pair of ints
>>> print(attr.get()) # get and print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
The second way consists of setting/querying an attribute as if it were a
normal python class attribute, using the usual dot notation. Above example
then becomes:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> vd.version = '1.0' # create vdata attribute 'version',
# setting it to string '1.0'
>>> print(vd.version) # print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> fld.range = (-10, 15) # create field attribute 'range', setting
# it to the pair of ints (-10, 15)
>>> print(fld.range) # print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
Note how the dot notation greatly simplifies and clarifies the code.
Some latitude is however lost by manipulating attributes in that way,
because the pyhdf package, not the programmer, is then responsible of
setting the attribute type. The attribute type is chosen to be one of:
=========== ====================================
HC.CHAR8 if the attribute value is a string
HC.INT32 if all attribute values are integers
HC.FLOAT64 otherwise
=========== ====================================
The first way of handling attribute values must be used if one wants to
define an attribute of any other type (for ex. 8 or 16 bit integers,
signed or unsigned). Also, only a VDAttr instance gives access to attribute
info, through its info() method.
However, accessing HDF attributes as if they were python attributes raises
an important issue. There must exist a way to assign generic attributes
to the python objects without requiring those attributes to be converted
to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
the object dictionary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
for attributes holding string values. An attribute initialized with an
'n' character string is simply a character attribute of order 'n' (eg a
character array of length 'n'). If 'vd' is a vdata and we initialize its
'a1' attribute as 'vd.a1 = "abcdef"', then a subsequent update attempt
like 'vd.a1 = "12"' will fail, because we then try to change the order
of the attribute (from 6 to 2). It is mandatory to keep the length of string
attributes constant. Examples below show simple ways how this can be done.
Predefined attributes
---------------------
The VD and VDField classes support predefined attributes to get (and
occasionnaly set) attribute values easily, without having to call a
class method. The names of predefined attributes all start with an
underscore ('_').
In the following tables, the RW column holds an X if the attribute
is read/write. See the HDF User s guide for details about more
"exotic" topics like "class", "faked vdata" and "tag".
VD predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_class X class name VSgetclass/VSsetclass
_fields list of field names VSgetfields
_interlace X interlace mode VSgetinterlace/VSsetinterlace
_isattr true if vdata is "faked" VSisattr
by HDF to hold attributes
_name X name of the vdata VSgetname/VSsetname
_nattrs number of attributes VSfnattrs
_nfields number of fields VFnfields
_nrecs number of records VSelts
_recsize record size (bytes) VSQueryvsize
_refnum reference number VSQueryref
_tag vdata tag VSQuerytag
_tnattrs total number of vdata and VSnattrs
field attributes
=========== == ========================== =============================
VDField predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_esize external size (bytes) VFfieldesize
_index index number VSfindex
_isize internal size (bytes) VFfieldisize
_name name VFfieldname
_nattrs number of attributes VSfnattrs
_order order (number of values) VFfieldorder
_type field type (HC.xxx) VFfieldtype
=========== == ========================== =============================
Record access: low and high level
---------------------------------
vdata records can be read and written in two different ways. The first one
consists of calling the basic I/O methods of the vdata:
- seek() to set the current record position, if necessary;
- read() to retrieve a given number of records from that position;
- write() to write a given number of records starting at
that position
A second, higher level way, lets one see a vdata similarly to a python
sequence, and access its contents using the familiar indexing and slicing
notation in square brackets. Reading and writing a vdata as if it were a
python sequence may often look simpler, and improve code legibility.
Here are some examples of how a vdata 'vd' holding 3 fields could be read.
>>> print(vd[0]) # print record 0
>>> print(vd[-1]) # print last record
>>> print(vd[2:]) # print records 2 and those that follow
>>> print(vd[:]) # print all records
>>> print(vd[:,0]) # print field 0 of all records
>>> print(vd[:3,:2]) # print first 2 fields of first 3 records
As the above examples show, the usual python rules are obeyed regarding
the interpretation of indexing and slicing values. Note that the vdata
fields can be indexed and sliced, not only the records. The setfields()
method can also be used to select a subset to the vdata fields
(setfields() also let you reorder the fields). When the vdata is
indexed (as opposed to being sliced), a single record is returned as a list
of values. When the vdata is sliced, a list of records is
always returned (thus a 2-level list), even if the slice contains only
one record.
A vdata can also be written similarly to a python sequence. When indexing
the vdata (as opposed to slicing it), a single record must be assigned,
and the record must be given as a sequence of values. It is legal to use
as an index the current number of records in the vdata: the record is then
appended to the vdata. When slicing the vdata, the records assigned to the
slice must always be given as a list of records, even
if only one record is assigned. Also, the number of records assigned must
always match the width of the slice, except if the slice includes or goes
past the last record of the vdata. In that case, the number of records
assigned can exceed the width of the slice, and the extra records are
appended to the vdata. So, to append records to vdata 'vd', simply
assign records to the slice 'vd[vd._nrecs:]'. Note that, even if the
'field' dimension can be specified in the left-hand side expression,
there is no real interest in doing so, since all fields must
be specified when assigning a record to the vdata: it is an error to
try to assign just a few of the fields.
For example, given a vdata 'vd' holding 5 records, and lists 'reca',
'recb', etc, holding record values::
vd[0] = reca # updates record 0
vd[0,:] = reca # specifying fields is OK, but useless
vd[0,1:] = reca[1:] # error: all fields must be assigned
vd[1] = [recb, recc] # error: only one record allowed
vd[5] = recc # append one record
vd[1:3] = [reca,recb] # updates second and third record
vd[1:4] = [reca, recb] # error: 3 records needed
vd[5:] = [reca,recb] # appends 2 records to the vdata
vd[4:] = [reca, recb] # updates last record, append one
Programming models
------------------
Creating and initializing a new vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following code can serve as a model for the creation and
initialization of a new vdata. It implements the INVENTORY example
described in the "Introduction" section::
from pyhdf.HDF import *
from pyhdf.VS import *
# Open HDF file and initialize the VS interface
f = HDF('inventory.hdf', # Open file 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
# Create vdata and define its structure
vd = vs.create( # create a new vdata
'INVENTORY', # name of the vdata
# fields of the vdata follow
(('partid',HC.CHAR8, 5), # 5 char string
('description',HC.CHAR8, 10), # 10 char string field
('qty',HC.INT16, 1), # 1 16 bit int field
('wght',HC.FLOAT32, 1), # 1 32 bit float
('price',HC.FLOAT32,1) # 1 32 bit float
)) # 5 fields allocated in the vdata
# Set attributes on the vdata and its fields
vd.field('wght').unit = 'lb'
vd.field('price').unit = '$'
# In order to be able to update a string attribute, it must
# always be set to the same length. This sets 'status' to a 20
# char long, left-justified string, padded with spaces on the right.
vd.status = "%-20s" % 'phase 1 done'
# Store records
vd.write(( # write 3 records
('Q1234', 'bolt',12, 0.01, 0.05), # record 1
('B5432', 'brush', 10, 0.4, 4.25), # record 2
('S7613', 'scissor', 2, 0.2, 3.75) # record 3
))
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note that is mandatory to always write whole records to the vdata.
Note also the comments about the initialization of the 'status'
vdata attribute. We want to be able update this attribute (see
following examples). However, the VS API prohibits changing an attribute
type when updating its value. Since the length (order) of an attribute
is part of its type, we make sure of setting the attribute to a length
long enough to accommodate the longest possible string we migh want to
assign to the attribute.
Appending records to a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Appending records requires first seeking to the end of the vdata, to avoid
overwriting existing records. The following code can serve as a model. The
INVENTORY vdata created before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 2 done')
vd[vd._nrecs:] = ( # append 2 records
('A4321', 'axe', 5, 1.5, 25), # first record
('C3214', 'cup', 100, 0.1, 3.25) # second record
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note how, when updating the value of the 'status' vdata attribute,
we take care of assigning a value of the same length as that of the
original value. Otherwise, the assignment would raise an exception.
Records are written by assigning the vdata through a slicing
expression, like a python sequence. By specifying the number of records
as the start of the slice, the records are appended to the vdata.
Updating records in a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating requires seeking to the record to update before writing the new
records. New data will overwrite this record and all records that follow,
until a new seek is performed or the vdata is closed. Note that record
numbering starts at 0.
The following code can serve as a model. The INVENTORY vdata created
before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')
# Update record at index 1 (second record)
vd[1] = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and all those that follow
vd[4:] = (
('QR231', 'toy', 12, 2.5, 45),
('R3389', 'robot', 3, 45, 2000)
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Reading a vdata
^^^^^^^^^^^^^^^
The following example shows how read the vdata attributes and sequentially
maneuver through its records. Note how we use the exception mechanism
to break out of the reading loop when we reach the end of the vdata::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
# Display some vdata attributes
print "status:", vd.status
print "vdata: ", vd._name # predefined attribute: vdata name
print "nrecs: ", vd._nrecs # predefined attribute: num records
# Display value of attribute 'unit' for all fields on which
# this attribute is set
print "units: ",
for fieldName in vd._fields: # loop over all field names
try:
# instantiate field and obtain value of attribute 'unit'
v = vd.field(fieldName).unit
print "%s: %s" % (fieldName, v),
except: # no 'unit' attribute: ignore
pass
print ""
print ""
# Display table header.
header = "%-7s %-12s %3s %4s %8s" % tuple(vd._fields)
print "-" * len(header)
print header
print "-" * len(header)
# Loop over the vdata records, displaying each record as a table row.
# Current record position is 0 after attaching the vdata.
while 1:
try:
rec = vd.read() # read next record
# equivalent to:
# rec = vd[vd.tell()]
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec[0])
except HDF4Error: # end of vdata reached
break
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
In the previous example, the reading/displaying loop can be greatly
simplified by rewriting it as follows::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
....
# Read all records at once, and loop over the sequence.
for rec in vd[:]:
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec)
vd.detach() # "close" the vdata
...
The indexing expression 'vd[:]' returns the complete set of records,
which can then be looped over using a 'for' statement. This style of loop
is quite clean, and should look very familiar to python adepts.
"""
import os, sys, types
from . import hdfext as _C
from . import six
from .six.moves import xrange
from .HC import HC
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.VS import *"
# statement
__all__ = ['VS', 'VD', 'VDField', 'VDAttr']
class VS(object):
"""The VS class implements the VS (Vdata) interface applied to an
HDF file.
To instantiate a VS class, call the vstart() method of an
HDF instance. """
def __init__(self, hinst):
# Not to be called directly by the user.
# A VS object is instantiated using the vstart()
# method of an HDF instance.
# Args:
# hinst HDF instance
# Returns:
# A VS instance
#
# C library equivalent : Vstart (rather: Vinitialize)
# Private attributes:
# _hdf_inst: HDF instance
# Note: Vstart is just a macro; use 'Vinitialize' instead
status = _C.Vinitialize(hinst._id)
_checkErr('VS', status, "cannot initialize VS interface")
self._hdf_inst = hinst
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._hdf_inst:
self.end()
except:
pass
def end(self):
"""Close the VS interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
_checkErr('end', _C.Vfinish(self._hdf_inst._id),
"cannot terminate VS interface")
self._hdf_inst = None
vend = end # For backward compatibility
def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd)
def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg)
def find(self, vName):
"""Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind
"""
refNum = _C.VSfind(self._hdf_inst._id, vName)
_checkErr("find", refNum, "cannot find vdata %s" % vName)
return refNum
def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num
def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst
def storedata(self, fieldName, values, data_type, vName, vClass):
"""Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam
"""
# See if the field is multi-valued.
nrecs = len(values)
if type(values[0]) in [list, tuple]:
order = len(values[0])
# Replace input list with a flattened list.
newValues = []
for el in values:
for e in el:
newValues.append(e)
values = newValues
else:
order = 1
n_values = nrecs * order
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("storedata: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
if order == 1:
vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass)
else:
vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass, order)
_checkErr('storedata', vd, 'cannot create vdata')
return vd
class VD(object):
"""The VD class encapsulates the functionality of a vdata.
To instantiate a VD class, call the attach() or the create()
method of a VS class instance."""
def __init__(self, vsinst, id):
# This constructor is not intended to be called directly
# by the user program. The attach() method of an
# VS class instance should be called instead.
# Arg:
# vsinst VS instance from which the call is made
# id vdata reference number
# Private attributes:
# _vs_inst VS instance to which the vdata belongs
# _id vdata identifier
# _offset current record offset
# _setfields last arg to setfields()
self._vs_inst = vsinst
self._id = id
self._offset = 0
self._setfields = None
def __getattr__(self, name):
"""Some vdata properties can be queried/set through the following
attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes. Most are read-only.
Only the _class, _fields, _interlace and _name can be modified.
_fields and _interlace can only be set once.
Name RO Description C library routine
----- -- ----------------- -----------------
_class class name VSgetclass
_fields X field names VSgetfields
_interlace interlace mode VSgetinterlace
_isattr X attribute vs real vdata VSisattr
_name name VSgetname
_nattrs X number of attributes VSfnattrs
_nfields X number of fields VFnfields
_nrecs X number of records VSelts
_recsize X record size VSQueryvsize
_refnum X reference number VSQueryref
_tag X tag VSQuerytag
_tnattrs X total number of attr. VSnattrs
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute
elif name == "_class":
status, nm = _C.VSgetclass(self._id)
_checkErr('_class', status, 'cannot get vdata class')
return nm
elif name == "_fields":
n, fields = _C.VSgetfields(self._id)
_checkErr('_fields', n, "cannot get vdata field names")
return fields.split(',')
elif name == "_interlace":
mode = _C.VSgetinterlace(self._id)
_checkErr('_interlace', mode, "cannot get vdata interlace mode")
return mode
elif name == "_isattr":
return _C.VSisattr(self._id)
elif name == "_name":
status, nm = _C.VSgetname(self._id)
_checkErr('_name', status, 'cannot get vdata name')
return nm
elif name == "_nattrs":
n = _C.VSfnattrs(self._id, -1) # -1: vdata attributes
_checkErr("_nfields", n, "cannot retrieve number of attributes")
return n
elif name == "_nfields":
n = _C.VFnfields(self._id)
_checkErr("_nfields", n, "cannot retrieve number of fields")
return n
elif name == "_nrecs":
n = _C.VSelts(self._id)
_checkErr('_nrecs', n, 'cannot get vdata number of records')
return n
elif name == "_recsize":
return self.inquire()[3]
elif name == "_refnum":
n = _C.VSQueryref(self._id)
_checkErr('refnum', n, 'cannot get reference number')
return n
elif name == "_tag":
n = _C.VSQuerytag(self._id)
_checkErr('_tag', n, 'cannot get tag')
return n
elif name == "_tnattrs":
n = _C.VSnattrs(self._id)
_checkErr('_tnattrs', n, 'execution error')
return n
raise AttributeError
def __setattr__(self, name, value):
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
# Forbid assigning to our predefined attributes
if name in ["_fields", "_isattr", "_nattrs", "_nfields",
"_nrecs", "_recsize", "_refnum", "_tag", "_tnattrs"]:
raise AttributeError("%s: read-only attribute" % name)
# Handle the 3 VS attributes: _class, _interlace
# and _name. _interlace can only be set once.
elif name == "_class":
_checkErr(name, _C.VSsetclass(self._id, value),
'cannot set _class property')
elif name == "_interlace":
_checkErr(name, _C.VSsetinterlace(self._id, value),
'cannot set _interlace property')
elif name == "_name":
_checkErr(name, _C.VSsetname(self._id, value),
'cannot set _name property')
# Try to set the attribute.
else:
_setattr(self, name, value)
def __getitem__(self, elem):
# This method is called when the vdata is read
# like a Python sequence.
# Parse the indexing expression.
start, count = self.__buildStartCount(elem)
# Reset current position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Get records. A negative count means that an index was used.
recs = self.read(abs(count[0]))
# See if all the fields must be returned.
f0 = start[1]
if f0 == 0 and count[1] == self._nfields:
out = recs
else:
# Return only a subset of the vdata fields.
out = []
f1 = f0 + count[1]
for r in recs:
out.append(r[f0:f1])
# If an index was used (not a slice), return the record as
# a list, instead of returning it inside a 2-level list,
if count[0] < 0:
return out[0]
return out
def __setitem__(self, elem, data):
# This method is called when the vdata is written
# like a Python sequence.
#
# When indexing the vdata, 'data' must specify exactly
# one record, which must be specified as a sequence. If the index is
# equal to the current number of records, the record
# is appended to the vdata.
#
# When slicing the vdata, 'data' must specify a list of records.
# The number of records in the top level-list must match the width
# of the slice, except if the slice extends past the end of the
# vdata. In that case, extra records can be specified in the list,
# which will be appended to the vdata. In other words,
# to append records to vdata 'vd', assign records to
# the slice 'vd[vd._nrecs:]'.
#
# For ex., given a vdata 'vd' holding 5 records, and lists
# 'reca', 'recb', etc holding record values:
# vd[0] = reca # updates record 0
# vd[1] = [recb, recc] # error: only one record allowed
# vd[1:3] = [reca,recb] # updates second and third record
# vd[1:4] = [reca, recb] # error: 3 records needed
# vd[5:] = [reca,recb] # appends 2 records to the vdata
# Check that arg is a list.
if not type(data) in [tuple, list]:
raise HDF4Error("record(s) must be specified as a list")
start, count = self.__buildStartCount(elem, setitem=1)
# Records cannot be partially written.
if start[1] != 0 or count[1] != self._nfields:
raise HDF4Error("each vdata field must be written")
# If an index (as opposed to a slice) was applied to the
# vdata, a single record must be passed. Since write() requires
# a 2-level list, wrap this record inside a list.
if count[0] < 0:
if len(data) != self._nfields:
raise HDF4Error("record does not specify all fields")
data = [data]
# A slice was used. The slice length must match the number of
# records, except if the end of the slice equals the number
# of records. Then, extra recors can be specified, which will
# be appended to the vdata.
else:
if count[0] != len(data):
if start[0] + count[0] != self._nrecs:
raise HDF4Error("illegal number of records")
# Reset current record position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Write records.
recs = self.write(data)
def __del__(self):
"""Delete the instance, first calling the detach() method
if not already done. """
try:
if self._id:
self.detach()
except:
pass
def detach(self):
"""Terminate access to the vdata.
Args::
no argument
Returns::
None
C library equivalent : VSdetach
"""
_checkErr('detach', _C.VSdetach(self._id), "cannot detach vdata")
self._id = None
def fdefine(self, name, type, order):
"""Define a field. To initialize a newly created vdata with
fields created with fdefine(), assign a tuple of field names
to the _fields attribute or call the setfields() method.
Args::
name field name
type field data type (one of HC.xxx)
order field order (number of values in the field)
Returns::
None
C library equivalent : VSfdefine
"""
_checkErr('fdefine', _C.VSfdefine(self._id, name, type, order),
'cannot define field')
def setfields(self, *fldNames):
"""Define the name and order of the fields to access
with the read() and write() methods.
Args::
fldNames variable length argument specifying one or more
vdata field names
Returns::
None
C library equivalent : VSsetfields
setfields() indicates how to perform the matching between the vdata
fields and the values passed to the write() method or returned
by the read() method.
For example, if the vdata contains fields 'a', 'b' and 'c' and
a "setfields('c','a')" call is made, read() will thereafter return
for each record the values of field 'c' and 'a', in that order.
Field 'b' will be ignored.
When writing to a vdata, setfields() has a second usage. It is used
to initialize the structure of the vdata, that is, the name and order
of the fields that it will contain. The fields must have been
previously defined by calls to the fdefine() method.
Following that first call, setfields() can be called again to
change the order in which the record values will be passed
to the write() method. However, since it is mandatory to write
whole records, subsequent calls to setfields() must specify every
field name: only the field order can be changed.
"""
_checkErr('setfields', _C.VSsetfields(self._id, ','.join(fldNames)),
'cannot execute')
self._setfields = fldNames # remember for read/write routines
def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index)
def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n
def seekend(self):
"""Set the current record position past the last vdata record.
Subsequent write() calls will append records to the vdata.
Args::
no argument
Returns::
index of the last record plus 1
C library equivalent : no equivalent
"""
try:
# Seek to the next-to-last record position
n = self.seek(self._nrecs - 1) # updates _offset
# Read last record, ignoring values
self.read(1) # updates _offset
return self._nrecs
except HDF4Error:
raise HDF4Error("seekend: cannot execute")
def tell(self):
"""Return current record position in the vdata.
Args::
no argument
Returns::
current record position; 0 is at start of vdata.
C library equivalent : no equivalent
"""
return self._offset
def read(self, nRec=1):
"""Retrieve the values of a number of records, starting
at the current record position. The current record position
is advanced by the number of records read. Current position
is 0 after "opening" the vdata with the attach() method.
Args::
nRec number of records to read
Returns::
2-level list. First level is a sequence of records,
second level gives the sequence of values for each record.
The values returned for each record are those of the fields
specified in the last call to method setfields(), in that
order. The complete vdata field set is returned if
setfields() has not been called.
An exception is raised if the current record position is
already at the end of the vdata when read() is called. This
exception can be caught as an "end of vdata" indication to
exit a loop which scans each record of the vdata. Otherwise,
the number of records to be read is lowered to the number of
records remaining in the vdata, if that number is less than
the number asked for by parameter 'nRec'. Setting 'nRec' to
an arbitrarily large value can thus be used to retrieve the
remaining records in the vdata.
C library equivalent : VSread
"""
# Validate number of records to read vs the current offset.
# Return "end of vdata" exception if already at end of vdata
# otherwise "clip" the number of records if it exceeds the
# number of remaining records in the vdata.
n = self._nrecs
if self._offset == n:
raise HDF4Error("end of vdata reached")
if self._offset + nRec > n:
nRec = self._offset + nRec - n
fields = self._setfields or self._fields
nFields = len(fields)
fieldList = ','.join(fields)
_checkErr('read', _C.VSsetfields(self._id, fieldList),
'error defining fields to read')
# Allocate a buffer to store the packed records.
bufSize = self.sizeof(fields) * nRec
bigBuf = _C.array_byte(bufSize)
# Read records
nRead = _C.VSread(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('read', nRead, 'read error')
self._offset += nRec
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Initialize return value
values = []
for numRec in range(nRead):
v = []
for numFld in range(nFields):
v.append(None)
values.append(v)
# Unpack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRead
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("read: illegal or unupported type %d" % \
data_type)
# Unpack the field values.
_C.array_voidp_setitem(fldArr, 0, buf)
_checkErr('read',
_C.VSfpack(self._id, 1, fieldList, bigBuf, bufSize,
nRead, fld._name, fldArr),
"cannot execute")
# Extract values from the field buffer.
k = 0
for numRec in range(nRead):
if order == 1:
values[numRec][numFld] = buf[k]
k += 1
else:
# Handle strings specially
if data_type == HC.CHAR8:
s = ''
for i in range(order):
v = buf[k]
if v != 0:
s += chr(v)
k += 1
values[numRec][numFld] = s
# Return field values as a list
else:
values[numRec][numFld] = []
for i in range(order):
values[numRec][numFld].append(buf[k])
k += 1
del buf
return values
def write(self, values):
"""Write records to the vdata. Writing starts at the current
record position, which is advanced by the number of records
written.
Args::
values: 2-level sequence. First level is a sequence of records.
A second level gives the sequence of record values.
It is mandatory to always write whole records. Thus
every record field must appear at the second level.
The record values are ordered according the list of
field names set in the last call to the setfields()
method. The ordre of the complete vdata field set is
used if setfields() has not been called.
Returns::
number of records written
To append to a vdata already holding 'n' records, it is necessary
to first move the current record position to 'n-1' with a call to
method seek(), then to call method read() for the side effect
of advancing the current record position past this last record.
Method seekend() does just that.
C library equivalent : VSwrite
"""
nFields = self._nfields
# Fields give the order the record values, as defined in the
# last call to setfields()
fields = self._setfields or self._fields
# We must pack values using the effective field order in the vdata
fieldList = ','.join(self._fields)
# Validate the values argument.
if nFields != len(fields):
raise HDF4Error("write: must write whole records")
if type(values) not in [list, tuple]:
raise HDF4Error("write: values must be a sequence")
nRec = len(values)
for n in range(nRec):
rec = values[n]
if type(rec) not in [list, tuple]:
raise HDF4Error("write: records must be given as sequences")
# Make sure each record is complete.
if len(rec) != nFields:
raise HDF4Error("write: records must specify every field")
# Allocate a buffer to store the packed records.
bufSize = self._recsize * nRec
bigBuf = _C.array_byte(bufSize)
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Pack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRec
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("write: illegal or unupported type %d" % \
data_type)
# Load the field buffer with values.
k = 0
for numRec in range(nRec):
val = values[numRec][numFld]
# Single-valued field
if order == 1:
buf[k] = val
k += 1
# Multi-valued field
else:
# Handle strings specially.
if data_type == HC.CHAR8:
if not isinstance(val, str):
raise HDF4Error("char fields must be set with strings")
n = len(val)
for i in range(order):
buf[k] = i < n and ord(val[i]) or 0
k += 1
# Should validate field elements ...
elif type(val) not in [list, tuple]:
raise HDF4Error("multi-values fields must be given as sequences")
else:
for i in range(order):
buf[k] = val[i]
k += 1
# Store address of the field buffer in first position
# of the field array. Pack the field values.
_C.array_voidp_setitem(fldArr, 0, buf) # fldArr[0] = buf
_checkErr('write',
_C.VSfpack(self._id, 0, fieldList, bigBuf, bufSize,
nRec, fld._name, fldArr),
"cannot execute")
del buf
# Write the packed records.
n = _C.VSwrite(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('write', n, 'cannot execute')
self._offset += nRec
return n
def inquire(self):
"""Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire
"""
status, nRecs, interlace, fldNames, size, vName = \
_C.VSinquire(self._id)
_checkErr('inquire', status, "cannot query vdata info")
return nRecs, interlace, fldNames.split(','), size, vName
def fieldinfo(self):
"""Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent
"""
lst = []
for n in range(self._nfields):
fld = self.field(n)
lst.append((fld._name,
fld._type,
fld._order,
fld._nattrs,
fld._index,
fld._esize,
fld._isize))
return lst
def sizeof(self, fields):
"""Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
n = _C.VSsizeof(self._id, str)
_checkErr('sizeof', n, "cannot retrieve field sizes")
return n
def fexist(self, fields):
"""Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
ret = _C.VSfexist(self._id, str)
if ret < 0:
return 0
else:
return 1
def attr(self, name_or_index):
"""Create a VDAttr instance representing a vdata attribute.
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist; in that
case, it will be created when the VSAttr
instance set() method is called
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, -1) # -1: vdata attribute
def findattr(self, name):
"""Search the vdata for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the vdata attributes.
Args::
no argument
Returns::
dictionary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
def __buildStartCount(self, elem, setitem=0):
# Called by __getitem__() and __setitem__() methods
# to parse the expression used inside square brackets to
# index/slice a vdata.
# If 'setitem' is set, the call comes from __setitem__()
# We then allow the start value to be past the last record
# so as to be able to append to the vdata.
#
# Return a 2-element tuple:
# - tuple of the start indices along the vdata dimensions
# - tuple of the count values along the vdata dimensions
# a count of -1 indicates that an index, not a slice
# was applied on the correcponding dimension.
# Make sure the indexing expression does not exceed the
# vdata number of dimensions (2).
if isinstance(elem, tuple):
if len(elem) > 2:
raise HDF4Error("illegal indexing expression")
else: # Convert single index to sequence
elem = [elem]
start = []
count = []
shape = [self._nrecs, self._nfields]
n = -1
for e in elem:
n += 1
# Simple index
if isinstance(e, int):
is_slice = False
if e < 0:
e += shape[n]
if e < 0 or e >= shape[n]:
if e == shape[n] and setitem:
pass
else:
raise HDF4Error("index out of range")
beg = e
end = e + 1
# Slice index
elif isinstance(e, slice):
is_slice = True
# None or 0 means not specified
if e.start:
beg = e.start
if beg < 0:
beg += shape[n]
else:
beg = 0
# None or maxint means not specified
if e.stop and e.stop != sys.maxsize:
end = e.stop
if end < 0:
end += shape[n]
else:
end = shape[n]
# Bug
else:
raise ValueError("invalid indexing expression")
# Clip end index and compute number of elements to get
if end > shape[n]:
end = shape[n]
if beg > end:
beg = end
if is_slice:
cnt = end - beg
else:
cnt = -1
start.append(beg)
count.append(cnt)
if n == 0:
start.append(0)
count.append(shape[1])
return start, count
class VDField(object):
"""The VDField class represents a vdata field.
To create a VDField instance, call the field() method of a
VD class instance. """
def __init__(self, vdinst, fIndex):
# This method should not be called directly by the user program.
# To create a VDField instance, obtain a VD class instance and
# call its field() method.
# Args:
# vdinst VD instance to which the field belongs
# fIndex field index
#
# Private attributes:
# _vd_inst VD instance to which the field belongs
# _idx field index
self._vd_inst = vdinst
self._idx = fIndex
def __getattr__(self, name):
"""Some field properties can be queried through the following
read-only attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes.
Name Description C library routine
----- ------------------- -----------------
_esize field external size VFfieldesize
_index field index number VSfindex
_isize field internal size VFfieldisize
_name field name VFfieldname
_nattrs number of attributes VSfnattrs
_order field order VFfieldorder
_type field type VFfieldtype
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute.
elif name == "_esize":
n = _C.VFfieldesize(self._vd_inst._id, self._idx)
_checkErr('_esize', n, "execution error")
return n
elif name == "_index":
return self._idx
elif name == "_isize":
n = _C.VFfieldisize(self._vd_inst._id, self._idx)
_checkErr('_isize', n, "execution error")
return n
elif name == "_name":
n = _C.VFfieldname(self._vd_inst._id, self._idx)
_checkErr('_name', n, "execution error")
return n
elif name == "_nattrs":
n = _C.VSfnattrs(self._vd_inst._id, self._idx)
_checkErr('_nattrs', n, "execution error")
return n
elif name == "_order":
n = _C.VFfieldorder(self._vd_inst._id, self._idx)
_checkErr('_order', n, "execution error")
return n
elif name == "_type":
type = _C.VFfieldtype(self._vd_inst._id, self._idx)
_checkErr('_type', type, 'cannot retrieve field type')
return type
raise AttributeError
def __setattr__(self, name, value):
# Forbid assigning to our predefined attributes
if name in ["_esize", "_index", "_isize", "_name",
"_nattrs", "_order", "_type"]:
raise AttributeError("%s: read-only attribute" % name)
# Try to set the attribute.
else:
_setattr(self, name, value)
def attr(self, name_or_index):
"""Create a VDAttr instance representing a field attribute.
Args::
name_or_index attribute name or index number; if a name is
specified, the attribute may not exist; in that
case, it will be created when the VDAttr
instance set() method is called; if an
index number is specified, the attribute
must exist
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, self._idx)
def find(self, name):
"""Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the field attributes.
Args::
no argument
Returns::
dictionary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
class VDAttr(object):
"""The VDAttr class encapsulates methods used to set and query attributes
defined at the level either of the vdata or of the vdata field.
To create an instance of this class, call the attr() method of a VD
(vdata) or VDField (vdata field) instance. """
def __init__(self, obj, name_or_index, fIndex):
# This constructor should not be called directly by the user
# program. The attr() method of a VD (vdata) or VDField
# (vdata field) must be called to instantiate this class.
# Args:
# obj object instance (VD or VDField) to which the
# attribute belongs
# name_or_index name or index of the attribute; if a name is
# given, an attribute with that name will be
# searched, if not found, a new index number will
# be generated
# fIndex field index, or -1 if the attribute belongs
# to the vdata
# Private attributes:
# _vd_inst VD instance
# _vdf_inst VDField instance or None
# _index attribute index or None
# _name attribute name or None
# _fIndex field index, or -1 obj is a VD instance
if isinstance(obj, VD):
self._vd_inst = obj
self._vdf_instance = None
self._fIndex = -1
else:
self._vd_inst = obj._vd_inst
self._vdf_inst = obj
self._fIndex = fIndex
# Name is given. Attribute may exist or not.
if isinstance(name_or_index, type('')):
self._name = name_or_index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
self._index = None
# Index is given. Attribute Must exist.
else:
self._index = name_or_index
status, self._name, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('attr', status, 'non-existent attribute')
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type HC.CHAR8) where the
values are returned as a string
C library equivalent : VSgetattr
"""
# Make sure th attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
# Obtain attribute type and the number of values.
status, aName, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('get', status, 'illegal parameters')
# Get attribute value.
convert = _array_to_ret
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("get: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.VSgetattr(self._vd_inst._id, self._fIndex,
self._index, buf)
_checkErr('get', status, 'illegal attribute ')
return convert(buf, n_values)
def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
if not isinstance(values[n], int):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.VSsetattr(self._vd_inst._id, self._fIndex, self._name,
data_type, n_values, buf)
_checkErr('attr', status, 'cannot execute')
# Update the attribute index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index")
def info(self):
"""Retrieve info about the attribute.
Args::
no argument
Returns::
4-element tuple with the following components:
-attribute name
-attribute data type (one of HC.xxx constants)
-attribute order (number of values)
-attribute size in bytes
C library equivalent : VSattrinfo
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
status, name, type, order, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex, self._index)
_checkErr('info', status, "execution error")
return name, type, order, size
###########################
# Support functions
###########################
def _setattr(obj, name, value):
# Called by the __setattr__ method of the VD and VDField objects.
#
# obj instance on which the attribute is set
# name attribute name
# value attribute value
if isinstance(value, six.string_types):
value = value.encode('utf8')
# Treat a name starting with and underscore as that of a
# standard python instance attribute.
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not bytes in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == bytes and not typeList:
typeList.append(t)
else:
typeList = []
break
if bytes in typeList:
xtype = HC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = HC.FLOAT64
elif int in typeList:
xtype = HC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = obj.attr(name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
| {
"repo_name": "fhs/pyhdf",
"path": "pyhdf/VS.py",
"copies": "1",
"size": "95695",
"license": "mit",
"hash": -1736249271370498600,
"line_mean": 35.7069428462,
"line_max": 89,
"alpha_frac": 0.5589424735,
"autogenerated": false,
"ratio": 4.324806797125683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5383749270625683,
"avg_score": null,
"num_lines": null
} |
"""
VTK inside Blender module.
This module provides code so that polydata from vtk can
be used inside of blender.
Python needs to find the vtk stuff and this module in order
for this to work, and you can either a) set your PYTHONPATH
in your environment, or you can b) hardcode your vtk path's
in your script, e.g.,
a) at the prompt, before starting blender, type:
PYTHONPATH=$VTK_ROOT/Wrapping/Python:${LIBRARY_OUTPUT_PATH}
PYTHONPATH=$PYTHONPATH:${PATH_TO_THIS_MODULE}
export PYTHONPATH
b) add the following to your script near the beginning, before
importing vtk or VTKBlender:
import sys
sys.path.append($VTK_ROOT/Wrapping/Python)
sys.path.append(${LIBRARY_OUTPUT_PATH})
sys.path.append(${PATH_TO_VTKBlender_MODULE})
Be sure to replace $VTK_ROOT and ${LIBRARY_OUTPUT_PATH} with
values that are relevant to your system. These values can be
found by starting vtkpython with no arguments and typing:
import sys
print sys.path
Usually the first two items reported are the ones you want.
Also replace ${PATH_TO_VTKBlender_MODULE} with wherever you have
put the VTKBlender module.
"""
import vtk
import time, string
try:
import Blender
from Blender import Mesh, Object, Material
except:
print "No Blender module found!"
__versiontag__ = "$Revision: 1.19 $"
__version__ = string.split(__versiontag__)[1]
# some flags to alter behavior
flags = 0
TRIS_TO_QUADS = 0x01
SMOOTH_FACES = 0x02
# What is this 'tri to quad' stuff? Well, sometimes it's best to
# try to read in pairs of consecutive triangles in as quad faces.
# An example: you extrude a tube along a polyline in vtk, and if
# you can get it into Blender as a bunch of quads, you can use a
# Catmull-Clark subdivision surface to smooth the tube out, with
# fewer creases.
def SetTrisToQuads():
global flags
flags = flags | TRIS_TO_QUADS
def SetTrisToTris():
global flags
flags = flags & ~TRIS_TO_QUADS
def SetFacesToSmooth():
global flags
flags = flags | SMOOTH_FACES
def SetFacesToFaceted():
global flags
flags = flags & ~SMOOTH_FACES
def BlenderToPolyData(me, uvlayer=None):
pcoords = vtk.vtkFloatArray()
pcoords.SetNumberOfComponents(3)
pcoords.SetNumberOfTuples(len(me.verts))
for i in range(len(me.verts)):
p0 = me.verts[i].co[0]
p1 = me.verts[i].co[1]
p2 = me.verts[i].co[2]
pcoords.SetTuple3(i, p0, p1, p2)
points = vtk.vtkPoints()
points.SetData(pcoords)
polys = vtk.vtkCellArray()
lines = vtk.vtkCellArray()
for face in me.faces:
if len(face.v) == 4:
polys.InsertNextCell(4)
polys.InsertCellPoint(face.v[0].index)
polys.InsertCellPoint(face.v[1].index)
polys.InsertCellPoint(face.v[2].index)
polys.InsertCellPoint(face.v[3].index)
elif len(face.v) == 3:
polys.InsertNextCell(3)
polys.InsertCellPoint(face.v[0].index)
polys.InsertCellPoint(face.v[1].index)
polys.InsertCellPoint(face.v[2].index)
elif len(face.v) == 2:
lines.InsertNextCell(2)
lines.InsertCellPoint(face.v[0].index)
lines.InsertCellPoint(face.v[1].index)
for edge in me.edges:
lines.InsertNextCell(2)
lines.InsertCellPoint(edge.v1.index)
lines.InsertCellPoint(edge.v2.index)
pdata =vtk.vtkPolyData()
pdata.SetPoints(points)
pdata.SetPolys(polys)
pdata.SetLines(lines)
if me.faceUV:
if uvlayer:
uvnames = me.getUVLayerNames()
if uvlayer in uvnames:
me.activeUVLayer = uvlayer
tcoords = vtk.vtkFloatArray()
tcoords.SetNumberOfComponents(2)
tcoords.SetNumberOfTuples(len(me.verts))
for face in me.faces:
for i in range(len(face.verts)):
uv = face.uv[i]
tcoords.SetTuple2(face.v[i].index, uv[0], uv[1])
pdata.GetPointData().SetTCoords(tcoords);
pdata.Update()
return pdata
def PolyDataMapperToBlender(pmapper, me=None):
global flags
faces = []
edges = []
oldmats = None
newmesh = 0
if (me == None):
me = Mesh.New()
newmesh = 1
else:
if me.materials:
oldmats = me.materials
me.verts = None # this kills the faces/edges tooo
pmapper.Update()
pdata = pmapper.GetInput()
plut = pmapper.GetLookupTable()
#print pdata.GetNumberOfCells()
scalars = pdata.GetPointData().GetScalars()
verts = []
for i in range(pdata.GetNumberOfPoints()):
point = pdata.GetPoint(i)
verts.append([point[0],point[1],point[2]])
me.verts.extend(verts)
# I think we can free some memory by killing the reference
# from vert to the list it points at (not sure though)
verts = []
colors = None
if ( (scalars != None) and (plut != None) ):
colors = []
# Have to be a bit careful since VTK 5.0 changed the
# prototype of vtkLookupTable.GetColor()
try:
# VTK 5.x
scolor = [0,0,0]
for i in range(scalars.GetNumberOfTuples()):
plut.GetColor(scalars.GetTuple1(i), scolor)
color = map(VTKToBlenderColor, scolor)
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
except:
# VTK 4.x
for i in range(scalars.GetNumberOfTuples()):
color = map(VTKToBlenderColor, \
plut.GetColor(scalars.GetTuple1(i)))
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
skiptriangle = False
for i in range(pdata.GetNumberOfCells()):
cell = pdata.GetCell(i)
#print i, pdata.GetCellType(i)
# Do lines
if pdata.GetCellType(i)==3:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
BlenderAddEdge(me, edges, n1, n2)
# Do poly lines
if pdata.GetCellType(i)==4:
for j in range(cell.GetNumberOfPoints()-1):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
BlenderAddEdge(me, edges, n1, n2)
# Do triangles
if pdata.GetCellType(i)==5:
if skiptriangle==True:
skiptriangle = False
elif ( (flags & TRIS_TO_QUADS) and
(i < pdata.GetNumberOfCells()-1) and
(pdata.GetCellType(i+1)==5) ):
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
nextcell = pdata.GetCell(i+1)
m1 = nextcell.GetPointId(0)
m2 = nextcell.GetPointId(1)
m3 = nextcell.GetPointId(2)
if ( (n2 == m3) and (n3 == m2) ):
BlenderAddFace(me, faces, n1, n2, m1, n3)
skiptriangle = True
else:
BlenderAddFace(me, faces, n1, n2, n3)
else:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
BlenderAddFace(me, faces, n1, n2, n3)
# Do triangle strips
if pdata.GetCellType(i)==6:
numpoints = cell.GetNumberOfPoints()
if ( (flags & TRIS_TO_QUADS) and (numpoints % 2 == 0) ):
for j in range(cell.GetNumberOfPoints()-3):
if (j % 2 == 0):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
n3 = cell.GetPointId(j+2)
n4 = cell.GetPointId(j+3)
BlenderAddFace(me, faces, n1, n2, n4, n3)
else:
for j in range(cell.GetNumberOfPoints()-2):
if (j % 2 == 0):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+1)
n3 = cell.GetPointId(j+2)
else:
n1 = cell.GetPointId(j)
n2 = cell.GetPointId(j+2)
n3 = cell.GetPointId(j+1)
BlenderAddFace(me, faces, n1, n2, n3)
# Do polygon
if pdata.GetCellType(i)==7:
# Add a vert at the center of the polygon,
# and break into triangles
x = 0.0
y = 0.0
z = 0.0
scal = 0.0
N = cell.GetNumberOfPoints()
for j in range(N):
point = pdata.GetPoint(cell.GetPointId(j))
x = x + point[0]
y = y + point[1]
z = z + point[2]
if (scalars != None):
scal = scal + scalars.GetTuple1(j)
x = x / N
y = y / N
z = z / N
scal = scal / N
newidx = len(me.verts)
me.verts.extend(x,y,z)
if (scalars != None):
try:
# VTK 5.x
scolor = [0,0,0]
plut.GetColor(scal, scolor)
color = map(VTKToBlenderColor, scolor)
except:
color = map(VTKToBlenderColor, plut.GetColor(scal))
alpha = int(plut.GetOpacity(scalars.GetTuple1(i))*255)
colors.append([color[0], color[1], color[2], alpha])
# Add triangles connecting polynomial sides to new vert
for j in range(N):
n1 = cell.GetPointId(j)
n2 = cell.GetPointId( (j+1) % N )
n3 = newidx
BlenderAddFace(me, faces, n1, n2, n3)
# Do pixel
if pdata.GetCellType(i)==8:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
n4 = cell.GetPointId(3)
BlenderAddFace(me, faces, n1, n2, n3, n4)
# Do quad
if pdata.GetCellType(i)==9:
n1 = cell.GetPointId(0)
n2 = cell.GetPointId(1)
n3 = cell.GetPointId(2)
n4 = cell.GetPointId(3)
BlenderAddFace(me, faces, n1, n2, n3, n4)
if len(edges) > 0:
me.edges.extend(edges)
if len(faces) > 0:
me.faces.extend(faces)
if ( flags & SMOOTH_FACES):
for f in me.faces:
f.smooth = 1
# Some faces in me.faces may have been discarded from our
# list, so best to compute the vertex colors after the faces
# have been added to the mesh
if (colors != None):
me.vertexColors = 1
for f in me.faces:
f_col = []
for v in f.v:
f_col.append(colors[v.index])
SetVColors(f.col, f_col)
if not me.materials:
if oldmats:
me.materials = oldmats
else:
newmat = Material.New()
if (colors != None):
newmat.mode |= Material.Modes.VCOL_PAINT
me.materials = [newmat]
if (newmesh==0):
me.update()
return me
def VTKToBlenderColor(x):
return int(255*float(x)+0.5)
def BlenderAddFace(me, faces, n1, n2, n3, n4=None):
if (n4 != None):
faces.append([me.verts[n1], me.verts[n2], \
me.verts[n3], me.verts[n4]])
else:
faces.append([me.verts[n1], me.verts[n2], me.verts[n3]])
def BlenderAddEdge(me, edges, n1, n2):
edges.append([me.verts[n1], me.verts[n2]])
def SetVColors(col, vcols):
for j in range(len(col)):
col[j].r = vcols[j][0]
col[j].g = vcols[j][1]
col[j].b = vcols[j][2]
if len(vcols[j]) == 3:
col[j].a = 255
else:
col[j].a = vcols[j][3]
| {
"repo_name": "unterweg/peanoclaw",
"path": "blender/VTKBlender.py",
"copies": "2",
"size": "11458",
"license": "bsd-3-clause",
"hash": 4189661370885415000,
"line_mean": 26.1516587678,
"line_max": 78,
"alpha_frac": 0.668528539,
"autogenerated": false,
"ratio": 2.762295081967213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4430823620967213,
"avg_score": null,
"num_lines": null
} |
# $Id: vtpWRT.py 2401 2006-12-20 20:29:15Z cpbotha $
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import types
from modules.viewers.slice3dVWRmodules.selectedPoints import outputSelectedPoints
class points_reader(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
# ctor for this specific mixin
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'DeVIDE points (*.dvp)|*.dvp|All files (*)|*',
{'Module (self)': self},
fileOpen=True)
# set up some defaults
self._config.filename = ''
self._output_points = None
self.sync_module_logic_with_config()
def close(self):
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ()
def set_input(self, idx, input_stream):
raise NotImplementedError
def get_output_descriptions(self):
return ('DeVIDE points',)
def get_output(self, idx):
return self._output_points
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
if self._config.filename:
fh = file(self._config.filename)
ltext = fh.read()
fh.close()
points_list = eval(ltext)
self._output_points = outputSelectedPoints()
self._output_points.extend(points_list)
| {
"repo_name": "chrisidefix/devide",
"path": "modules/readers/points_reader.py",
"copies": "7",
"size": "1795",
"license": "bsd-3-clause",
"hash": -2391106429337194500,
"line_mean": 26.196969697,
"line_max": 81,
"alpha_frac": 0.6139275766,
"autogenerated": false,
"ratio": 4.0610859728506785,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8175013549450678,
"avg_score": null,
"num_lines": null
} |
# $Id: vtpWRT.py 2401 2006-12-20 20:29:15Z cpbotha $
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import types
class points_writer(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
# ctor for this specific mixin
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'DeVIDE points (*.dvp)|*.dvp|All files (*)|*',
{'Module (self)': self},
fileOpen=False)
# set up some defaults
self._config.filename = ''
self._input_points = None
self.sync_module_logic_with_config()
def close(self):
# we should disconnect all inputs
self.set_input(0, None)
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ('DeVIDE points',)
def set_input(self, idx, input_stream):
self._input_points = input_stream
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise Exception
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
if self._input_points and hasattr(self._input_points, 'devideType') \
and self._input_points.devideType == 'namedPoints' \
and self._config.filename:
fh = file(self._config.filename, 'w')
fh.write(str(self._input_points))
fh.close()
| {
"repo_name": "fvpolpeta/devide",
"path": "modules/writers/points_writer.py",
"copies": "7",
"size": "1825",
"license": "bsd-3-clause",
"hash": -3016213426091495000,
"line_mean": 27.0769230769,
"line_max": 77,
"alpha_frac": 0.5945205479,
"autogenerated": false,
"ratio": 4.0198237885462555,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014939808202407673,
"num_lines": 65
} |
# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $
"""Provides helpers for Template.webInput(), a method for importing web
transaction variables in bulk. See the docstring of webInput for full details.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.10 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2006/01/06 21:56:54 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.10 $"[11:-2]
from Cheetah.Utils.Misc import useOrRaise
class NonNumericInputError(ValueError): pass
##################################################
## PRIVATE FUNCTIONS AND CLASSES
class _Converter:
"""A container object for info about type converters.
.name, string, name of this converter (for error messages).
.func, function, factory function.
.default, value to use or raise if the real value is missing.
.error, value to use or raise if .func() raises an exception.
"""
def __init__(self, name, func, default, error):
self.name = name
self.func = func
self.default = default
self.error = error
def _lookup(name, func, multi, converters):
"""Look up a Webware field/cookie/value/session value. Return
'(realName, value)' where 'realName' is like 'name' but with any
conversion suffix strips off. Applies numeric conversion and
single vs multi values according to the comments in the source.
"""
# Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
# If there's no colon, the suffix is "". 'longName' is the name with the
# suffix, 'shortName' is without.
# XXX This implementation assumes "height:" means "height".
colon = name.find(':')
if colon != -1:
longName = name
shortName, ext = name[:colon], name[colon+1:]
else:
longName = shortName = name
ext = ''
# Step 2 -- look up the values by calling 'func'.
if longName != shortName:
values = func(longName, None) or func(shortName, None)
else:
values = func(shortName, None)
# 'values' is a list of strings, a string or None.
# Step 3 -- Coerce 'values' to a list of zero, one or more strings.
if values is None:
values = []
elif isinstance(values, str):
values = [values]
# Step 4 -- Find a _Converter object or raise TypeError.
try:
converter = converters[ext]
except KeyError:
fmt = "'%s' is not a valid converter name in '%s'"
tup = (ext, longName)
raise TypeError(fmt % tup)
# Step 5 -- if there's a converter func, run it on each element.
# If the converter raises an exception, use or raise 'converter.error'.
if converter.func is not None:
tmp = values[:]
values = []
for elm in tmp:
try:
elm = converter.func(elm)
except (TypeError, ValueError):
tup = converter.name, elm
errmsg = "%s '%s' contains invalid characters" % tup
elm = useOrRaise(converter.error, errmsg)
values.append(elm)
# 'values' is now a list of strings, ints or floats.
# Step 6 -- If we're supposed to return a multi value, return the list
# as is. If we're supposed to return a single value and the list is
# empty, return or raise 'converter.default'. Otherwise, return the
# first element in the list and ignore any additional values.
if multi:
return shortName, values
if len(values) == 0:
return shortName, useOrRaise(converter.default)
return shortName, values[0]
# vim: sw=4 ts=4 expandtab
| {
"repo_name": "bcui6611/healthchecker",
"path": "Cheetah/Utils/WebInputMixin.py",
"copies": "16",
"size": "3869",
"license": "apache-2.0",
"hash": -24264941728142090,
"line_mean": 36.931372549,
"line_max": 80,
"alpha_frac": 0.6141121737,
"autogenerated": false,
"ratio": 3.9319105691056913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Toplevel Window class."""
from BaseWidget import BaseWidget
from Bin import Bin
from Container import Container
from Constants import *
from StyleInformation import StyleInformation
import base
class Window (Bin):
"""Window (title=None) -> Window
A widget class, that implements a window-like behaviour.
The Window class is a container, which provides a window-like look
and feel, can be moved around the screen and supports an additional
caption bar.
The title to display on the Window caption bar can be set using the
'title' attribute or set_title() method.
window.title = 'Window caption'
window.set_title ('Another title')
It is possible to influence the position of the attached child with
the 'align' attribute and set_align() method. Alignments can be
combined bitwise to place the child at any of the eight possible
positions.
However, not every alignment make sense, so a ALIGN_TOP | ALIGN_BOTTOM
would cause the child to be placed at the bottom. The priority
order for the alignment follows. The lower the value, the higher the
priority.
Alignment Priority
-----------------------
ALIGN_BOTTOM 0
ALIGN_TOP 1
ALIGN_LEFT 0
ALIGN_RIGHT 1
ALIGN_NONE 2
Additionally the Window can be moved around the screen by pressing
and holding the left mouse button on its caption bar and moving the
mouse around.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_MOUSEDOWN - Invoked, when a mouse button is pressed on the
Window.
SIG_MOUSEUP - Invoked, when a mouse button is released on the
Window.
SIG_MOUSEMOVE - Invoked, when the mouse moves over the Window.
Attributes:
title - The caption of the Window.
align - Alignment of the child.
"""
def __init__ (self, title=None):
Bin.__init__ (self)
self.__stopevents = False
self._title = None
self._align = ALIGN_NONE
# Rectangle area for mouse click & movement on the window
# caption.
self._captionrect = None
# State variables for button pressing and mouse movements.
self.__pressed = False
self.__oldpos = None
self._keepactive = False
self._signals[SIG_MOUSEDOWN] = []
self._signals[SIG_MOUSEUP] = []
self._signals[SIG_MOUSEMOVE] = []
self.minsize = (100, 20)
self.set_title (title)
def set_focus (self, focus=True):
"""W.set_focus (...) -> None
Sets the input and action focus of the window.
Sets the input and action focus of the window and returns True
upon success or False, if the focus could not be set.
"""
self.lock ()
if focus:
self.state = STATE_ACTIVE
elif (not self._keepactive) and (self.state == STATE_ACTIVE):
self.state = STATE_NORMAL
Bin.set_focus (self, focus)
self.unlock ()
return True
def set_title (self, text=None):
"""W.set_title (...) -> None
Sets the title caption to display on the Window.
Sets the text to display as title on the Window.
Raises a TypeError, if the passed argument is not a string or
unicode.
"""
if text and (type (text) not in (str, unicode)):
raise TypeError ("text must be a string or unicode")
self._title = text
self.dirty = True
def set_align (self, align):
"""W.set_align (...) -> None
Sets the alignment for the child of the Window.
"""
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
self._align = align
self.dirty = True
def _move_to_position (self, pos):
"""W._move_to_position (...) -> None
Moves the Window to the position relative from itself.
"""
self.topleft = (self.topleft[0] + pos[0] - self.__oldpos[0], \
self.topleft[1] + pos[1] - self.__oldpos[1])
self._captionrect.topleft = self.topleft
self.__oldpos = pos
def notify (self, event):
"""W.notify (event) -> None
Notifies the window about an event.
"""
if not self.sensitive:
return
# Recursively notify all attached children.
if self.child:
self._notify_children (self.child, event)
if self.__stopevents:
return
for control in self.controls:
if event.handled:
return
self._notify_children (control, event)
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
if event.signal == SIG_MOUSEDOWN:
if eventarea.collidepoint (event.data.pos):
if not event.handled:
self.focus = True
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
if self._captionrect.collidepoint (event.data.pos):
if event.data.button == 1:
# Initiate window movement.
self.state = STATE_ACTIVE
self.__pressed = True
self.__oldpos = event.data.pos
event.handled = True
else:
self.state = STATE_NORMAL
elif event.signal == SIG_MOUSEUP:
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEUP, event.data)
if event.data.button == 1:
if self.__pressed:
self.__pressed = False
event.handled = True
elif event.signal == SIG_MOUSEMOVE:
if self.__pressed:
# The window is moved.
self._move_to_position (event.data.pos)
event.handled = True
elif eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEMOVE, event.data)
event.handled = True
elif event.signal == SIG_FOCUSED:
# Keep the active state of the window, if it contains the child
if self._contains (self, event.data):
if self.state != STATE_ACTIVE:
self.state = STATE_ACTIVE
self._keepactive = True
else:
self._keepactive = False
Bin.notify (self, event)
def _notify_children (self, widget, event):
"""W._notify_children (...) -> None
Notifies all widgets of the Window.
"""
if not self.__stopevents:
if not event.handled:
widget.notify (event)
if not self.__stopevents:
for control in widget.controls:
self._notify_children (control, event)
if isinstance (widget, Bin) and (widget.child != None):
self._notify_children (widget.child, event)
elif isinstance (widget, Container):
for child in widget.children:
self._notify_children (child, event)
def _contains (self, parent, widget):
"""W.contains (...) -> bool
Checks, whether the Window contains the widget.
"""
contains = self._contains
if parent == widget:
return True
for control in parent.controls:
if control == widget:
return True
elif contains (control, widget):
return True
if isinstance (parent, Bin):
if (parent.child != None) and contains (parent.child, widget):
return True
elif isinstance (parent, Container):
for child in parent.children:
if contains (child, widget):
return True
return False
def dispose_widget (self):
"""W.dispose_widget (...) -> int, int
Moves the child of the Window to its correct position.
"""
cls = self.__class__
style = base.GlobalStyle
st = self.style or style.get_style (cls)
border = style.get_border_size (cls, st,
StyleInformation.get ("WINDOW_BORDER"))
dropshadow = style.get_style_entry (cls, st, "shadow")
width = self.image.get_rect ().width
height = self.image.get_rect ().height
posx = (width - dropshadow - self.child.width) / 2
posy = self._captionrect.height + \
(height - self._captionrect.height - self.child.height - \
dropshadow) / 2
if self.align & ALIGN_LEFT:
posx = border + self.padding
elif self.align & ALIGN_RIGHT:
posx = width - self.child.width - border - self.padding - \
dropshadow
if self.align & ALIGN_TOP:
posy = self._captionrect.height + self.padding
elif self.align & ALIGN_BOTTOM:
posy = height - self.child.height - border - self.padding - \
dropshadow
return posx, posy
def draw_bg (self):
"""W.draw_bg () -> Surface
Draws the background surface of the Window and returns it.
"""
return base.GlobalStyle.engine.draw_window (self)
def draw (self):
"""W.draw () -> None
Draws the Window.
"""
Bin.draw (self)
cls = self.__class__
style = base.GlobalStyle
st = self.style or style.get_style (cls)
dropshadow = style.get_style_entry (cls, st, "shadow")
rect = self.image.get_rect ()
# Create the caption.
surface_caption = style.engine.draw_caption (rect.width - dropshadow,
self.title, self.state,
cls, st)
self._captionrect = rect
self._captionrect = surface_caption.get_rect ()
self._captionrect.topleft = self.topleft
# Blit the caption.
self.image.blit (surface_caption, (0, 0))
# Position and blit the child.
if self.child:
posx, posy = self.dispose_widget ()
self.child.topleft = posx, posy
self.image.blit (self.child.image, (posx, posy))
def destroy (self):
"""D.destroy () -> None
Destroys the Window and removes it from its event system.
"""
self.__stopevents = True
Bin.destroy (self)
title = property (lambda self: self._title,
lambda self, var: self.set_title (var),
doc = "The title caption of the Window.")
align = property (lambda self: self._align,
lambda self, var: self.set_align (var),
doc = "The alignment of the child.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Window.py",
"copies": "1",
"size": "12717",
"license": "bsd-2-clause",
"hash": -4046622043938781000,
"line_mean": 35.2307692308,
"line_max": 79,
"alpha_frac": 0.5672721554,
"autogenerated": false,
"ratio": 4.487297106563162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010987867006570394,
"num_lines": 351
} |
""" The PelotonWorker and related classes are defined in this module.
A worker is started by a PSC. Separating
workers from PSC by process ensures that user code running in a service
is unable to derail a PSC or cause such issues that may interfere with
other services. In this way a service may even make use of the event loop
in its worker container, thus becoming asynchronous without risking
over-burdening the PSC event loop, nor of locking it.
The PSC and Generator communicate as follows::
PSC Generator
| --- setHostParams(host, port) -------->|
| --- startWorker(key) ----------------->|
<start worker process>
| |
| Worker
| <--- getRoot() (initialise RPC) -------|
| ---- return Root object (1) ---------->|
| <--- registerWorker(...) --------------|
| ---- return PSC Referenceable(3) ----->|
(1) is the peloton.kernel.PSCRoot; (2) is the peloton.worker.KernelInterface
referenceable and (3) is the peloton.kernel.WorkerInterface referenceable.
The worker is now able to receive instructions from the PSC and is also
able to register with events on the PSC as well as pass other messages back.
"""
from peloton.utils import bigThreadPool
from twisted.internet import reactor
from twisted.internet.threads import deferToThread
try:
from twisted.internet.error import ReactorNotRunning
except ImportError:
# running in Twisted 2.5
ReactorNotRunning = Exception
from twisted.spread import pb
from peloton.base import HandlerBase
from peloton.exceptions import WorkerError
from peloton.exceptions import ServiceConfigurationError
from peloton.exceptions import ServiceError
from peloton.utils import getClassFromString
import peloton.utils.logging as logging
import sys
class PelotonWorker(HandlerBase):
""" A Peloton Worker manages services, executes methods and returns
results to its controling PSC.
"""
def __init__(self, pscHost, pscPort, token):
""" The parent PSC is found at pscHost:pscPort - the host
will in general be the host on which this worker resides but we
allow for other scenarios by passing the host through at this point.
"""
HandlerBase.__init__(self)
self.pscHost = pscHost
self.pscPort = pscPort
self.token = token
self.dispatcher = WorkerEventDispatcher(self)
def start(self):
""" Start this worker; returns an exit code when worker
closes down. """
reactor.callWhenRunning(self._initialise)
reactor.run()
return 0
def _initialise(self):
""" Start the boot-strap process of connecting to the
master PSC, starting the service and announcing ourselves ready to
rock. """
self.kernelInterface = KernelInterface(self)
factory = pb.PBClientFactory()
try:
reactor.connectTCP(self.pscHost, self.pscPort, factory)
d = factory.getRootObject()
d.addCallback(self._clientConnect)
d.addErrback(self._clientConnectError)
except Exception, ex:
raise WorkerError("Could not connect to PSC: %s" % str(ex))
def _clientConnect(self, rootObj):
""" Root object obtained; now offer our interface and the token we
were given to start with to validate our presence. """
self.psc = rootObj
d = self.psc.callRemote("registerWorker", self.kernelInterface, self.token)
d.addCallback(self._pscOK)
d.addErrback(self._clientConnectError)
def _pscOK(self, startupInfo):
""" Now start the service. If OK, message the PSC accordingly;
if not, let the PSC know we've failed and why, then initiate closedown. """
self.name = startupInfo['serviceName']
self.publishedName = startupInfo['publishedName']
self.servicepath = startupInfo['servicePath']
logging.closeHandlers()
logToConsole = False
if startupInfo['logdir'] == None:
logToConsole = True
logging.initLogging(rootLoggerName='WORKER: %s' % self.name,
logLevel=getattr(logging, startupInfo['loglevel']),
logdir=startupInfo['logdir'],
logfile="worker_%s.log" % self.name,
logToConsole=logToConsole)
logging.setAdditionalLoggers(self)
self.logger = logging.getLogger()
# add any sevice directories to sys.path if not already there
for sd in self.servicepath:
if sd not in sys.path:
sys.path.append(sd)
self.loadService(startupInfo['runtimeConfig'])
try:
self.pscReference = startupInfo['pwa']
except Exception,ex:
self.logger.exception('[1]')
try:
self.startService()
self.dispatcher.fireEvent( 'psc.service.notification',
'domain_control',
serviceName=self.name,
publishedName=self.publishedName,
state='running',
token=self.token)
self.pscReference.callRemote('serviceStartOK', self.__service.version)
except Exception, ex:
self.pscReference.callRemote('serviceStartFailed', str(ex))
self.logger.info("PWP Started for service %s " % self.name)
reactor.callLater(3, self.heartBeat)
def _clientConnectError(self, err):
print("Error connecting with PSC: %s" % err.getErrorMessage())
reactor.stop()
def closedown(self):
self.stopService()
try:
reactor.callLater(0.5,reactor.stop)
# reactor.stop()
except ReactorNotRunning:
pass
def heartBeat(self):
""" Call the heartBeat on the PSC reference to show we're alive."""
try:
d = self.pscReference.callRemote('heartBeat')
d.addErrback(self._heartBeatFailed)
reactor.callLater(3, self.heartBeat)
except pb.DeadReferenceError, ex:
self._heartBeatFailed(ex)
except Exception,ex:
self.logger.exception('[2]')
def _heartBeatFailed(self, err):
self.closedown()
def loadService(self, runtimeConfig = None):
""" Loading a service happens as follows:
- Load service class
- Validate its signature cookie ???
- Load configs: Here the configuration files are read and
internals are organised. This is generally NOT overidden by the
service writer who instead provides the startup() method to do logical
business level initialisation.
Raises ServiceConfigurationError if the name is invalid.
"""
try:
pqcn = "%s.%s.%s" % (self.name.lower(), self.name.lower(), self.name)
cls = getClassFromString(pqcn)
self.__service = cls(self.name, self.dispatcher, logging.getLogger(self.name))
self.__service.initSupportServices()
self.__service.loadConfig(self.servicepath, runtimeConfig)
except Exception, ex:
raise ServiceConfigurationError("Could not find class for service %s" % self.name, ex)
def startService(self):
""" Call serviceClass.start(): this is the method which sets up
loggers, starts connection pools and does any other initialisation
the service might require.
"""
try:
self.__service.start()
except Exception, ex:
raise ServiceConfigurationError("Error starting service %s" % self.name, ex)
def stopService(self):
""" Calls stop() on the managed service. """
try:
self.dispatcher.fireEvent( 'psc.service.notification',
'domain_control',
serviceName=self.name,
publishedName=self.publishedName,
state='stopped',
token=self.token)
self.__service.stop()
except Exception, ex:
raise ServiceError("Error stopping service %s" % self.name, ex)
def call(self, method, *args, **kwargs):
""" Call and excecute the specified method with args as provided. """
mthd = getattr(self.__service, "public_%s"%method)
return deferToThread(mthd, *args, **kwargs)
class KernelInterface(pb.Referenceable):
""" This class mediates between the worker and the kernel; it
is the means by which the kernel makes method requests etc."""
def __init__(self, pw):
""" pw is the parent PelotonWorker class. """
self.worker = pw
def remote_getState(self):
""" Return a dictionary of state information. """
pass
def remote_stop(self):
""" Stop this worker"""
self.worker.closedown()
def remote_call(self, method, *args, **kwargs):
""" Return the result of calling method(*args, **kwargs)
on this service. """
return self.worker.call(method, *args, **kwargs)
class WorkerEventDispatcher(object):
def __init__(self, worker):
self.worker = worker
def register(self, key, handler, exchange='events'):
try:
self.worker.pscReference.callRemote('register',
key, handler, exchange)
except Exception,ex:
print(ex)
def deregister(self, handler):
try:
self.worker.pscReference.callRemote('deregister', handler)
except Exception,ex:
print(ex)
def fireEvent(self, key, exchange='events', **kwargs):
try:
self.worker.pscReference.callRemote('fireEvent',
key, exchange, **kwargs)
except Exception,ex:
print("Error firing worker event (%s) %s: %s" % (str(ex), key, str(kwargs)))
def getRegisteredExchanges(self):
"""Not relevent in this dispatcher. """
raise NotImplementedError()
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/worker.py",
"copies": "1",
"size": "10346",
"license": "bsd-3-clause",
"hash": -1462811722712096000,
"line_mean": 38.3384030418,
"line_max": 98,
"alpha_frac": 0.6112507249,
"autogenerated": false,
"ratio": 4.321637426900585,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5432888151800586,
"avg_score": null,
"num_lines": null
} |
# $Id: /work/modules/celementtree/selftest.py 1128 2005-12-16T21:57:13.668520Z Fredrik $
# elementtree selftest program
# this test script uses Python's "doctest" module to check that the
# *test script* works as expected.
import sys, StringIO
import cElementTree as ElementTree
def unserialize(text):
import StringIO
file = StringIO.StringIO(text)
tree = ElementTree.parse(file)
return tree.getroot()
def serialize(elem, encoding=None):
import StringIO
file = StringIO.StringIO()
tree = ElementTree.ElementTree(elem)
if encoding:
tree.write(file, encoding)
else:
tree.write(file)
return file.getvalue()
def summarize(elem):
return elem.tag
def summarize_list(seq):
return map(summarize, seq)
SAMPLE_XML = unserialize("""
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
""")
SAMPLE_XML_NS = unserialize("""
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
""")
# interface tests
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print "expected one-character string, got %r" % char
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print "expected value string, got %r" % mapping["key"]
def check_element(element):
if not ElementTree.iselement(element):
print "not an element"
if not hasattr(element, "tag"):
print "no tag member"
if not hasattr(element, "attrib"):
print "no attrib member"
if not hasattr(element, "text"):
print "no text member"
if not hasattr(element, "tail"):
print "no tail member"
check_string(element.tag)
check_mapping(element.attrib)
if element.text != None:
check_string(element.text)
if element.tail != None:
check_string(element.tail)
def check_element_tree(tree):
check_element(tree.getroot())
def check_method(method):
if not callable(method):
print method, "not callable"
def version():
"""
>>> ElementTree.__version__
'1.0.5'
>>> ElementTree.VERSION
'1.0.5'
"""
def element():
"""
Test element tree interface.
>>> element = ElementTree.Element("tag")
>>> check_element(element)
>>> tree = ElementTree.ElementTree(element)
>>> check_element_tree(tree)
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.getiterator)
Basic method sanity checks.
>>> serialize(element) # 1
'<tag key="value" />'
>>> subelement = ElementTree.Element("subtag")
>>> element.append(subelement)
>>> serialize(element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(element) # 6
'<tag key="value" />'
"""
def parsefile():
"""
Test parsing from file. Note that we're opening the files in
here; by default, the 'parse' function opens the file in binary
mode, and doctest doesn't filter out carriage returns.
>>> tree = ElementTree.parse(open("samples/simple.xml", "r"))
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ElementTree.parse(open("samples/simple-ns.xml", "r"))
>>> tree.write(sys.stdout)
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
>>> parser = ElementTree.XMLParser()
>>> parser.version
'Expat 1.95.8'
>>> parser.feed(open("samples/simple.xml").read())
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> parser = ElementTree.XMLTreeBuilder() # 1.2 compatibility
>>> parser.feed(open("samples/simple.xml").read())
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> target = ElementTree.TreeBuilder()
>>> parser = ElementTree.XMLParser(target=target)
>>> parser.feed(open("samples/simple.xml").read())
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def writefile():
"""
>>> elem = ElementTree.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ElementTree.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
>>> elem.insert(0, ElementTree.Comment("comment"))
>>> serialize(elem)
'<tag>text<!-- comment --><subtag>subtext</subtag></tag>'
>>> elem[0] = ElementTree.PI("key", "value")
>>> serialize(elem)
'<tag>text<?key value?><subtag>subtext</subtag></tag>'
"""
def encoding():
r"""
Test encoding issues.
>>> elem = ElementTree.Element("tag")
>>> elem.text = u"abc"
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, "utf-8")
'<tag>abc</tag>'
>>> serialize(elem, "us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, "iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, "utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, "us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"'>" />'
>>> serialize(elem, "utf-8")
'<tag key="<&"'>" />'
>>> serialize(elem, "us-ascii")
'<tag key="<&"'>" />'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"'>" />'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, "utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, "us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, "iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>" />'
>>> serialize(elem, "utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />'
>>> serialize(elem, "us-ascii")
'<tag key="åöö<>" />'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>" />'
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ElementTree.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
2) decorated attributes
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag ns0:key="value" xmlns:ns0="uri" />'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(unserialize("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(unserialize("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(unserialize("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ElementTree.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("./tag"))
['tag', 'tag']
>>> elem = SAMPLE_XML_NS
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = unserialize("<tag>hello<foo/></tag>")
>>> e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1)
'<tag>hello<bar /></tag>'
>>> serialize(e2)
'<tag>hello<bar /></tag>'
>>> serialize(e3)
'<tag>hello<foo /></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ElementTree.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ElementTree.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ElementTree.Element("tag", attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ElementTree.Element("tag", **attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
>>> elem = ElementTree.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 5.1
'value'
>>> elem.attrib # 5.2
{'key': 'value'}
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ElementTree.Element("tag")
>>> attrib = {"key": "value"}
>>> subelem = elem.makeelement("subtag", attrib)
>>> if subelem.attrib is attrib:
... print "attrib aliasing"
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag />'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
"""
def iterparse():
"""
Test iterparse interface.
>>> context = ElementTree.iterparse("samples/simple.xml")
>>> for action, elem in context:
... print action, elem.tag
end element
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = ElementTree.iterparse("samples/simple-ns.xml")
>>> for action, elem in context:
... print action, elem.tag
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = ElementTree.iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ()
>>> context = ElementTree.iterparse("samples/simple.xml", events=events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ("start", "end")
>>> context = ElementTree.iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print action, elem.tag
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end")
>>> context = ElementTree.iterparse("samples/simple-ns.xml", events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
>>> events = ("start", "end", "bogus")
>>> context = ElementTree.iterparse("samples/simple.xml", events)
>>> if sys.version[:3] > "2.1": # don't apply this test for 2.1
... for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
... else:
... raise ValueError("unknown event 'bogus'")
Traceback (most recent call last):
ValueError: unknown event 'bogus'
"""
def custom_builder():
"""
Test parser w. custom builder.
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
>>> builder = Builder()
>>> parser = ElementTree.XMLParser(builder)
>>> parser.feed(open("samples/simple.xml", "r").read())
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
... def pi(self, target, data):
... print "pi", target, repr(data)
... def comment(self, data):
... print "comment", repr(data)
>>> builder = Builder()
>>> parser = ElementTree.XMLParser(builder)
>>> parser.feed(open("samples/simple-ns.xml", "r").read())
pi pi 'data'
comment ' comment '
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
"""
def getchildren():
"""
>>> tree = ElementTree.parse(open("samples/simple.xml", "r"))
>>> for elem in tree.getiterator():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) bad entities
>>> ElementTree.XML("<document>&entity;</document>")
Traceback (most recent call last):
SyntaxError: undefined entity: line 1, column 10
2) custom entity
>>> parser = ElementTree.XMLParser()
>>> parser.entity["entity"] = "text"
>>> parser.feed(ENTITY_XML)
>>> root = parser.close()
>>> serialize(root)
'<document>text</document>'
"""
#
# reported bugs
class ExceptionFile:
def read(self, x):
raise IOError
def xmltoolkit60():
"""
Handle crash in stream source.
>>> tree = ElementTree.parse(ExceptionFile())
Traceback (most recent call last):
IOError
"""
def xmltoolkit61(encoding):
"""
Handle non-standard encodings.
>>> xmltoolkit61("ascii")
>>> xmltoolkit61("us-ascii")
>>> xmltoolkit61("iso-8859-1")
>>> xmltoolkit61("iso-8859-15")
>>> xmltoolkit61("cp437")
>>> xmltoolkit61("mac-roman")
"""
ElementTree.XML(
"<?xml version='1.0' encoding='%s'?><xml />" % encoding
)
if __name__ == "__main__":
import doctest
failed, tested = doctest.testmod(__import__(__name__))
print tested - failed, "tests ok."
| {
"repo_name": "prats226/python-amazon-product-api-0.2.8",
"path": "tests/build/cElementTree/selftest.py",
"copies": "1",
"size": "18060",
"license": "bsd-3-clause",
"hash": -4323028278743729000,
"line_mean": 26.1987951807,
"line_max": 94,
"alpha_frac": 0.5677740864,
"autogenerated": false,
"ratio": 3.4851408722500965,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45529149586500967,
"avg_score": null,
"num_lines": null
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = 1
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = 0
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
lcode = self.document.settings.language_code
language = languages.get_language(lcode)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "kdwink/intellij-community",
"path": "python/helpers/py2only/docutils/transforms/writer_aux.py",
"copies": "5",
"size": "2553",
"license": "apache-2.0",
"hash": -9057898937839919000,
"line_mean": 28.0113636364,
"line_max": 77,
"alpha_frac": 0.5871523698,
"autogenerated": false,
"ratio": 4.567084078711986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7654236448511986,
"avg_score": null,
"num_lines": null
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = 1
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = 0
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
lcode = self.document.settings.language_code
language = languages.get_language(lcode)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/transforms/writer_aux.py",
"copies": "2",
"size": "2648",
"license": "bsd-3-clause",
"hash": -5466556861300985000,
"line_mean": 28.0909090909,
"line_max": 77,
"alpha_frac": 0.5679758308,
"autogenerated": false,
"ratio": 4.661971830985915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6229947661785915,
"avg_score": null,
"num_lines": null
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = 1
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = 0
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "chirilo/remo",
"path": "vendor-local/lib/python/docutils/transforms/writer_aux.py",
"copies": "6",
"size": "2602",
"license": "bsd-3-clause",
"hash": 7546197465620724000,
"line_mean": 28.5681818182,
"line_max": 79,
"alpha_frac": 0.5810914681,
"autogenerated": false,
"ratio": 4.638146167557932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8219237635657931,
"avg_score": null,
"num_lines": null
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = 1
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = 0
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "ajaxsys/dict-admin",
"path": "docutils/transforms/writer_aux.py",
"copies": "2",
"size": "2690",
"license": "bsd-3-clause",
"hash": -1782652355335115800,
"line_mean": 28.5681818182,
"line_max": 79,
"alpha_frac": 0.5620817844,
"autogenerated": false,
"ratio": 4.727592267135325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6289674051535326,
"avg_score": null,
"num_lines": null
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = True
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = False
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "suncycheng/intellij-community",
"path": "python/helpers/py3only/docutils/transforms/writer_aux.py",
"copies": "44",
"size": "2602",
"license": "apache-2.0",
"hash": -1307849995317812500,
"line_mean": 28.5681818182,
"line_max": 79,
"alpha_frac": 0.5818601076,
"autogenerated": false,
"ratio": 4.65474060822898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
import datetime
import xmlrpclib
from twisted.application import service
from twisted.web import xmlrpc
from peloton.coreio import PelotonRequestInterface
def dump_xmlrpc_date(self, value, write):
# according to spec at http://www.xmlrpc.com/spec XML-RPC date
# is ISO 8601 format e.g. 19980717T14:08:55
date = datetime.datetime.strftime(value, "%Y%m%dT%H:%M:%S")
write("<value><dateTime.iso8601>%s</dateTime.iso8601></value>" % date)
def dump_xmlrpc_time(self, value, write):
""" Serialize a datetime.time instance iso8601 format"""
write("<value><dateTime.iso8601>%s</dateTime.iso8601></value>" % value.strftime("%Y%m%dT%H:%M:%S"))
def dump_xmlrpc_none(self, value, write):
""" Handle None types by sending a string? """
write("<value><nil/></value>")
class PelotonXMLRPCHandler(service.Service):
""" XMLRPC protocol interface"""
def __init__(self, kernel):
self.kernel = kernel
self.logger = kernel.logger
self.requestInterface = PelotonRequestInterface(kernel)
# Need to add a marshaler for datetime instances as xmlrpclib
# will only marshall it's own style of datetime.
xmlrpclib.Marshaller.dispatch[type(datetime.datetime.now())] = dump_xmlrpc_date
xmlrpclib.Marshaller.dispatch[type(datetime.date(2007,1,1))] = dump_xmlrpc_date
xmlrpclib.Marshaller.dispatch[type(datetime.time(0,0))] = dump_xmlrpc_time
xmlrpclib.Marshaller.dispatch[type(None)] = dump_xmlrpc_none
self.setResource()
def xmlrpc_request(self, sessionId, *args, **kargs):
""" Make a service request. Takes the following arguments:
- service (string)
- method (string)
- <parameters>
"""
service = args[0]
method = args[1]
params = args[2:]
return self.requestInterface.public_call(sessionId, "raw", service, method, params, {})
def setResource(self):
""" Create the XMLRPC resource and attach all xmlrpc_* methods
to it. """
x = xmlrpc.XMLRPC()
publishedMethods = [i for i in dir(self) if i.startswith('xmlrpc_')]
for mthd in publishedMethods:
# equiv to x.<mthd> = self.<mthd>
setattr(x, mthd, getattr(self, mthd))
self._resource = x
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/adapters/xmlrpc.py",
"copies": "1",
"size": "2451",
"license": "bsd-3-clause",
"hash": -103273594877179410,
"line_mean": 37.9047619048,
"line_max": 103,
"alpha_frac": 0.6597307222,
"autogenerated": false,
"ratio": 3.631111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4790841833311111,
"avg_score": null,
"num_lines": null
} |
# $('img[src^="https://z2.muscache.com/ac"]').each(function(x, y){console.log(x, y.src);})
from urlparse import urlsplit
from os.path import basename
import urllib2
import re
import os
import json
with open('./input.json') as json_data:
houseNumber = json.load(json_data)['houseNumber']
print houseNumber
dir_name = 'houses/' + houseNumber
if not os.path.exists(dir_name):
os.mkdir(dir_name)
offset = 0
f = open('./houses/source.txt')
url_content = f.read()
answers = re.findall('.+?\.muscache\.com\/.+?\/pictures\/([^\.]+)\.jpg', url_content)
img_count = 0
print len(answers)
while offset < len(answers):
post_url = "https://z2.muscache.com/ac/pictures/" + answers[offset] + ".jpg"
print post_url
try:
img_data = urllib2.urlopen(post_url).read()
file_name = basename(urlsplit(post_url)[2])
output = open(dir_name + '/' + file_name, 'wb')
output.write(img_data)
output.close()
print "Saved {} images".format(img_count)
img_count += 1
except KeyboardInterrupt:
print "Exit on user prompt"
exit(1)
except:
print file_name + ' downloading failed!'
pass
finally:
offset += 1
print "Page down - offset {}".format(offset)
| {
"repo_name": "Jeff-Tian/mybnb",
"path": "batch_download_pic.py",
"copies": "1",
"size": "1274",
"license": "apache-2.0",
"hash": 5873959632851549000,
"line_mean": 27.3111111111,
"line_max": 90,
"alpha_frac": 0.6130298273,
"autogenerated": false,
"ratio": 3.300518134715026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4413547962015026,
"avg_score": null,
"num_lines": null
} |
# $ ipython --gui=wx
# In [1]: %run visualization/plot-2.py
# In [2]: plot("z_dual_1.norms")
from mayavi import mlab
import numpy
import re
def mycolor(x):
"""Returns a color vector (a triple of floats) based on x, where x is
in the range [0, 1].
"""
lut = [
[ 0, 0, 0, 255],
[ 1, 0, 0, 255],
[ 2, 0, 0, 255],
[ 4, 0, 0, 255],
[ 5, 0, 0, 255],
[ 6, 0, 0, 255],
[ 8, 0, 0, 255],
[ 9, 0, 0, 255],
[ 10, 0, 0, 255],
[ 12, 0, 0, 255],
[ 14, 0, 0, 255],
[ 16, 0, 0, 255],
[ 17, 0, 0, 255],
[ 18, 0, 0, 255],
[ 20, 0, 0, 255],
[ 21, 0, 0, 255],
[ 23, 0, 0, 255],
[ 24, 0, 0, 255],
[ 26, 0, 0, 255],
[ 27, 0, 0, 255],
[ 28, 0, 0, 255],
[ 29, 0, 0, 255],
[ 31, 0, 0, 255],
[ 32, 0, 0, 255],
[ 33, 0, 0, 255],
[ 35, 0, 0, 255],
[ 36, 0, 0, 255],
[ 37, 0, 0, 255],
[ 39, 0, 0, 255],
[ 40, 0, 0, 255],
[ 42, 0, 0, 255],
[ 43, 0, 0, 255],
[ 46, 0, 0, 255],
[ 47, 0, 0, 255],
[ 48, 0, 0, 255],
[ 50, 0, 0, 255],
[ 51, 0, 0, 255],
[ 53, 0, 0, 255],
[ 54, 0, 0, 255],
[ 55, 0, 0, 255],
[ 56, 0, 0, 255],
[ 58, 0, 0, 255],
[ 59, 0, 0, 255],
[ 60, 0, 0, 255],
[ 62, 0, 0, 255],
[ 63, 0, 0, 255],
[ 65, 0, 0, 255],
[ 66, 0, 0, 255],
[ 68, 0, 0, 255],
[ 69, 0, 0, 255],
[ 70, 0, 0, 255],
[ 71, 0, 0, 255],
[ 73, 0, 0, 255],
[ 74, 0, 0, 255],
[ 77, 0, 0, 255],
[ 78, 0, 0, 255],
[ 80, 0, 0, 255],
[ 81, 0, 0, 255],
[ 82, 0, 0, 255],
[ 84, 0, 0, 255],
[ 85, 0, 0, 255],
[ 86, 0, 0, 255],
[ 88, 0, 0, 255],
[ 89, 0, 0, 255],
[ 91, 0, 0, 255],
[ 93, 0, 0, 255],
[ 95, 0, 0, 255],
[ 96, 0, 0, 255],
[ 97, 0, 0, 255],
[ 98, 0, 0, 255],
[100, 0, 0, 255],
[101, 0, 0, 255],
[102, 0, 0, 255],
[104, 0, 0, 255],
[105, 0, 0, 255],
[108, 0, 0, 255],
[110, 0, 0, 255],
[111, 0, 0, 255],
[113, 0, 0, 255],
[114, 0, 0, 255],
[115, 0, 0, 255],
[116, 0, 0, 255],
[118, 0, 0, 255],
[119, 0, 0, 255],
[120, 0, 0, 255],
[122, 0, 0, 255],
[123, 0, 0, 255],
[124, 0, 0, 255],
[126, 0, 0, 255],
[127, 0, 0, 255],
[128, 0, 0, 255],
[130, 0, 0, 255],
[131, 0, 0, 255],
[133, 0, 0, 255],
[134, 0, 0, 255],
[135, 0, 0, 255],
[138, 0, 0, 255],
[140, 0, 0, 255],
[140, 0, 0, 255],
[142, 0, 0, 255],
[143, 0, 0, 255],
[145, 0, 0, 255],
[146, 0, 0, 255],
[147, 0, 0, 255],
[149, 0, 0, 255],
[150, 0, 0, 255],
[152, 0, 0, 255],
[153, 0, 0, 255],
[155, 0, 0, 255],
[156, 0, 0, 255],
[157, 0, 0, 255],
[158, 0, 0, 255],
[160, 0, 0, 255],
[161, 0, 0, 255],
[162, 0, 0, 255],
[164, 0, 0, 255],
[165, 0, 0, 255],
[167, 0, 0, 255],
[169, 0, 0, 255],
[170, 0, 0, 255],
[172, 0, 0, 255],
[173, 0, 0, 255],
[175, 1, 0, 255],
[176, 3, 0, 255],
[177, 4, 0, 255],
[179, 6, 0, 255],
[180, 8, 0, 255],
[182, 10, 0, 255],
[183, 13, 0, 255],
[185, 16, 0, 255],
[187, 17, 0, 255],
[188, 19, 0, 255],
[189, 20, 0, 255],
[191, 22, 0, 255],
[192, 24, 0, 255],
[194, 26, 0, 255],
[195, 28, 0, 255],
[197, 30, 0, 255],
[198, 32, 0, 255],
[200, 34, 0, 255],
[202, 36, 0, 255],
[203, 38, 0, 255],
[205, 40, 0, 255],
[206, 42, 0, 255],
[207, 44, 0, 255],
[209, 46, 0, 255],
[210, 48, 0, 255],
[211, 49, 0, 255],
[212, 51, 0, 255],
[214, 52, 0, 255],
[215, 54, 0, 255],
[217, 56, 0, 255],
[218, 58, 0, 255],
[220, 60, 0, 255],
[221, 61, 0, 255],
[222, 63, 0, 255],
[224, 65, 0, 255],
[225, 67, 0, 255],
[226, 68, 0, 255],
[227, 70, 0, 255],
[229, 72, 0, 255],
[232, 76, 0, 255],
[233, 77, 0, 255],
[234, 79, 0, 255],
[236, 81, 0, 255],
[237, 83, 0, 255],
[239, 85, 0, 255],
[240, 86, 0, 255],
[241, 88, 0, 255],
[242, 89, 0, 255],
[244, 91, 0, 255],
[245, 93, 0, 255],
[247, 95, 0, 255],
[248, 97, 0, 255],
[249, 99, 0, 255],
[251, 101, 0, 255],
[252, 102, 0, 255],
[253, 103, 0, 255],
[255, 105, 0, 255],
[255, 107, 0, 255],
[255, 109, 0, 255],
[255, 111, 0, 255],
[255, 114, 0, 255],
[255, 117, 0, 255],
[255, 118, 0, 255],
[255, 120, 0, 255],
[255, 121, 0, 255],
[255, 123, 0, 255],
[255, 125, 0, 255],
[255, 127, 0, 255],
[255, 129, 0, 255],
[255, 131, 0, 255],
[255, 133, 1, 255],
[255, 136, 8, 255],
[255, 137, 11, 255],
[255, 139, 15, 255],
[255, 141, 19, 255],
[255, 143, 22, 255],
[255, 145, 26, 255],
[255, 146, 30, 255],
[255, 148, 34, 255],
[255, 150, 37, 255],
[255, 152, 41, 255],
[255, 154, 47, 255],
[255, 157, 52, 255],
[255, 159, 55, 255],
[255, 161, 59, 255],
[255, 162, 63, 255],
[255, 164, 67, 255],
[255, 166, 70, 255],
[255, 168, 74, 255],
[255, 170, 78, 255],
[255, 171, 81, 255],
[255, 173, 85, 255],
[255, 174, 89, 255],
[255, 176, 93, 255],
[255, 178, 96, 255],
[255, 180, 100, 255],
[255, 182, 103, 255],
[255, 184, 107, 255],
[255, 186, 110, 255],
[255, 187, 114, 255],
[255, 188, 118, 255],
[255, 190, 122, 255],
[255, 192, 126, 255],
[255, 196, 133, 255],
[255, 198, 137, 255],
[255, 200, 140, 255],
[255, 202, 144, 255],
[255, 203, 148, 255],
[255, 205, 152, 255],
[255, 206, 155, 255],
[255, 208, 158, 255],
[255, 210, 162, 255],
[255, 212, 166, 255],
[255, 214, 169, 255],
[255, 216, 173, 255],
[255, 217, 177, 255],
[255, 219, 181, 255],
[255, 221, 184, 255],
[255, 222, 188, 255],
[255, 224, 192, 255],
[255, 226, 195, 255],
[255, 228, 199, 255],
[255, 229, 203, 255],
[255, 231, 206, 255],
[255, 234, 212, 255],
[255, 237, 217, 255],
[255, 238, 221, 255],
[255, 240, 225, 255],
[255, 242, 228, 255],
[255, 244, 232, 255],
[255, 245, 236, 255],
[255, 247, 240, 255],
[255, 249, 243, 255],
[255, 251, 247, 255]
]
if(x < 0 or x > 1):
raise Exception("illegal scale")
i = int(x*255)
return (lut[i][0]/255., lut[i][1]/255., lut[i][2]/255.)
def stratify(number_bins, norms, *args):
"""Stratify the lists by norms into number_bins strata.
The args contain the centers and the widths, i.e. args <- center_i,
center_j, width_i, width_j. The function returns a tuple (norms,
centers) of the stratified result.
"""
length = len(norms)
for i in range(len(args)):
if len(args[i]) != length:
print(norms)
print(i)
print(args[i])
raise Exception("All lengths have to match")
min_norm = numpy.amin(norms)
max_norm = numpy.amax(norms)
def bound(i):
return min_norm+i*(max_norm-min_norm)/float(number_bins)
args_stratified = [ [ [] for j in range(number_bins) ] for i in range(len(args)) ]
norms_stratified = [ [] for i in range(number_bins) ]
print("stratifying into {:d} bins".format(number_bins))
for i in range(len(norms)):
found_bin = False
for j in range(number_bins):
if norms[i] >= bound(j) and norms[i] < bound(j+1):
for k in range(len(args)):
args_stratified[k][j].append(args[k][i])
norms_stratified[j].append(norms[i])
found_bin = True
break
if not found_bin:
for k in range(len(args)):
args_stratified[k][number_bins-1].append(args[k][i])
norms_stratified[j].append(norms[i])
# for i in range(number_bins):
# print("{:d} norm [{:1.2f},{:1.2f})".format(
# len(args_stratified[0][i]), bound(i), bound(i+1)))
result = [ norms_stratified ]
for arg in args_stratified:
result.append(arg)
return result
def read_squares(fd, start, end=None):
"""Reads a norms file from a call to spamm_tree_print_leaves_2d_symm().
"""
# Use readline() with a length argument so we can tell whether the
# file as reached EOF.
LINE_LENGTH = 1000
i = []
j = []
width_i = []
width_j = []
norm = []
re_matrix_square = re.compile("^\s*([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9.eEdD+-]+)$")
while True:
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
return None
if start.search(line):
matrix_name = line.rstrip()
break
line = fd.readline()
block_size = int(line)
while True:
old_position = fd.tell()
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
break
if end != None:
if end.search(line):
fd.seek(old_position)
break
result = re_matrix_square.search(line)
i.append(float(result.group(1)))
j.append(float(result.group(2)))
width_i.append(int(result.group(3)))
width_j.append(int(result.group(4)))
norm.append(float(result.group(5)))
print("loaded {:d} matrix squares from {:s}".format(len(i), matrix_name))
result = (block_size, i, j, width_i, width_j, norm)
#print(result)
return result
def read_cubes(fd, start, end=None):
"""Reads a norms file from a call to spamm_tree_print_leaves_2d_symm().
"""
# Use readline() with a length argument so we can tell whether the
# file as reached EOF.
LINE_LENGTH = 1000
i = []
j = []
k = []
width_i = []
width_j = []
width_k = []
norm = []
re_product_cube = re.compile("^\s*([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9.eEdD+-]+)$")
while True:
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
return None
if start.search(line):
matrix_name = line.rstrip()
break
line = fd.readline()
block_size = int(line)
while True:
old_position = fd.tell()
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
break
if end != None:
if end.search(line):
fd.seek(old_position)
break
result = re_product_cube.search(line)
i.append(float(result.group(1)))
j.append(float(result.group(2)))
k.append(float(result.group(3)))
width_i.append(int(result.group(4)))
width_j.append(int(result.group(5)))
width_k.append(int(result.group(6)))
norm.append(float(result.group(7)))
print("loaded {:d} product cubes from {:s}".format(len(i), matrix_name))
return (i, j, k, width_i, width_j, width_k, norm)
@mlab.show
def plot(filename, number_bins=6):
"""Plot the cubes from a file.
The cubes are stratified into number_bins norm bins. The
transparency of the cubes is set depending on which norm bin the
cube is in.
"""
re_matrix_A = re.compile("^\s*Matrix A$")
re_matrix_B = re.compile("^\s*Matrix B$")
re_matrix_C = re.compile("^\s*Matrix C$")
re_product_space = re.compile("^\s*Product Space$")
fd = open(filename)
(block_size, A_i, A_j,
A_width_i, A_width_j, A_norm) = read_squares(fd, re_matrix_A, end=re_matrix_B)
(block_size, B_i, B_j,
B_width_i, B_width_j, B_norm) = read_squares(fd, re_matrix_B, end=re_matrix_C)
(block_size, C_i, C_j,
C_width_i, C_width_j, C_norm) = read_squares(fd, re_matrix_C, end=re_product_space)
(prod_i, prod_j, prod_k,
prod_width_i, prod_width_j, prod_width_k, prod_norm) = read_cubes(fd, re_product_space)
# Get the current figure.
figure = mlab.gcf()
# Get the engine.
engine = mlab.get_engine()
# Clean the figure.
mlab.clf()
# Turn off rendering (for performance).
figure.scene.disable_render = True
# Tune background color.
figure.scene.background = (1., 1., 1.)
# Stratify matrix squares.
(norms_stratified,
A_i_stratified, A_j_stratified,
A_width_i_stratified, A_width_j_stratified) = stratify(number_bins, A_norm,
A_i, A_j, A_width_i, A_width_j)
# Add matrices.
print("Plotting matrix A")
for i in range(number_bins):
if len(A_i_stratified[i]) > 0:
points = mlab.points3d(A_i_stratified[i],
[1 for j in range(len(A_i_stratified[i]))],
A_j_stratified[i],
mode='cube',
color=(0.0, 0.5019607843137255, 0.5019607843137255),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = 0
points.glyph.glyph_source.glyph_source.z_length = block_size
(norms_stratified,
B_i_stratified, B_j_stratified,
B_width_i_stratified, B_width_j_stratified) = stratify(number_bins, B_norm,
B_i, B_j, B_width_i, B_width_j)
# Add matrices.
print("Plotting matrix B")
for i in range(number_bins):
if len(B_i_stratified[i]) > 0:
points = mlab.points3d([1 for j in range(len(B_i_stratified[i]))],
B_j_stratified[i],
B_i_stratified[i],
mode='cube',
color=(0.5019607843137255, 0.0, 0.0),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = 0
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = block_size
(norms_stratified,
C_i_stratified, C_j_stratified,
C_width_i_stratified, C_width_j_stratified) = stratify(number_bins, C_norm,
C_i, C_j, C_width_i, C_width_j)
# Add matrices.
print("Plotting matrix C")
for i in range(number_bins):
if len(C_i_stratified[i]) > 0:
points = mlab.points3d(C_i_stratified[i],
C_j_stratified[i],
[1 for j in range(len(C_i_stratified[i]))],
mode='cube',
color=(0.5019607843137255, 0.0, 0.5019607843137255),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = 0
# Stratify cubes by norm.
(norms_stratified, prod_i_stratified, prod_j_stratified, prod_k_stratified) = stratify(
number_bins, prod_norm, prod_i, prod_j, prod_k)
# Add cubes.
print("Plotting product cubes")
for i in range(number_bins):
if len(prod_i_stratified[i]) > 0:
points = mlab.points3d(prod_i_stratified[i],
prod_j_stratified[i],
prod_k_stratified[i],
mode='cube',
color=(0.2,.2,.2),
scale_factor=1,
opacity=0.75*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = block_size
i_max = max(numpy.amax(prod_i), numpy.amax(prod_j), numpy.amax(prod_k))+block_size/2
print("i_max = {:e}".format(i_max))
# Insert fake invisible data-set for axes.
mlab.points3d([1, i_max], [1, i_max], [1, i_max], mode='cube', scale_factor=0)
#mlab.axes(xlabel="i", ylabel="j", zlabel="k", extent=[1, xmax, 1, xmax, 1, xmax])
# Box around the whole thing.
mlab.outline(extent=[1, i_max, 1, i_max, 1, i_max])
outline = engine.scenes[0].children[-1].children[0].children[1]
outline.actor.property.color = (0, 0, 0)
outline.actor.property.line_width = 2
# Add axes.
from mayavi.modules.axes import Axes
axes = Axes()
engine.add_module(axes, obj=None)
axes.axes.label_format = '%-3.0f'
axes.axes.width = 2
axes.axes.x_label = 'i'
axes.axes.y_label = 'j'
axes.axes.z_label = 'k'
axes.label_text_property.color = (0, 0, 0)
axes.label_text_property.opacity = 0.0
axes.label_text_property.shadow = True
axes.label_text_property.shadow_offset = numpy.array([ 1, -1])
axes.property.color = (0, 0, 0)
axes.property.display_location = 'background'
axes.title_text_property.color = (0, 0, 0)
axes.title_text_property.shadow_offset = numpy.array([ 1, -1])
figure.scene.disable_render = False
figure.scene.camera.compute_view_plane_normal()
import os.path
#-------------------------------------------------------------------------------------------------------
#./spammsand_invsqrt 33_x8_11_S.mm 1.d-1 1.d-3 1.d-1 1.d-1 D U R b=16
# figure.scene.isometric_view()
# png_filename = os.path.splitext(filename)[0] + "_isov.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
# figure.scene.camera.position = [2381.7518163797836, 2526.3678093421449, 2530.13269951962]
# figure.scene.camera.focal_point = [440.00000000000028, 440.0000000000029, 439.99999999999733]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.4189314063923294, -0.41776697205346547, 0.80620545383879905]
# figure.scene.camera.clipping_range = [1986.7866107311997, 5491.0522577990569]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
#./spammsand_invsqrt water_500_to_6-311gss.mm 1.d-2 1.d-4 1.d-1 0.d0 D U R
figure.scene.camera.position = [35816.735234550884, 38331.094829602851, 41443.525860211055]
figure.scene.camera.focal_point = [2614.1156973829502, 2621.6382407405645, -241.34477379674968]
figure.scene.camera.view_angle = 30.0
figure.scene.camera.view_up = [-0.45361775222697864, -0.4654155004102597, 0.76001272807921516]
figure.scene.camera.clipping_range = [26313.825398895184, 87716.669164634935]
figure.scene.camera.compute_view_plane_normal()
figure.scene.render()
png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
print("Saving image to " + png_filename)
figure.scene.save(png_filename,size=(768,768))
#./spammsand_invsqrt water_100_to_6-311gss.mm 1.d-1 1.d-3 1.d-1 1.d-1 D U R
# figure.scene.isometric_view()
# png_filename = os.path.splitext(filename)[0] + "_isov.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
# figure.scene.camera.position = [7131.7121897731495, 7525.4214914466402, 8101.2951483680154]
# figure.scene.camera.focal_point = [1702.818579072205, 1686.6399910935772, 1285.485703136407]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.45361775222697859, -0.46541550041025964, 0.76001272807921505]
# figure.scene.camera.clipping_range = [5042.9256084193876, 17324.211816361931]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
#-------------------------------------------------------------------------------------------------------
# ./spammsand_invsqrt bcsstk14.mtx 1.d-2 1.d-4 1.d-1 0.d0 D U R
# figure.scene.camera.position = [1045.203726965188, 1039.2064081296085, 6702.5003353789853]
# figure.scene.camera.focal_point = [874.472594413058, 898.76786979832445, 939.79123074155348]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.70042965936561086, -0.71270269865532587, 0.038120278204521671]
# figure.scene.camera.clipping_range = [3849.5157839671483, 8271.9264727908048]
# figure.scene.camera.compute_view_plane_normal()
# png_filename = os.path.splitext(filename)[0] + "_x_zoomview.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
# figure.scene.camera.position = [2030.6693081026092, 2031.6946128119116, 2101.6583772785889]
# figure.scene.camera.focal_point = [904.5, 904.5, 904.5]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.4254783969169838, -0.42401748949585194, 0.79948564862578286]
# figure.scene.camera.clipping_range = [5.9413455805998874, 5941.3455805998874]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_y_zoomview.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
#-------------------------------------------------------------------------------------------------------
# Turn rendering back on.
# Save the figure to file.
# import os.path
# png_filename = os.path.splitext(filename)[0] + ".png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
| {
"repo_name": "FreeON/spammpack",
"path": "spammsand/visualization/plot-2.py",
"copies": "1",
"size": "24103",
"license": "bsd-3-clause",
"hash": -3819621512518133000,
"line_mean": 34.6026587888,
"line_max": 104,
"alpha_frac": 0.4756254408,
"autogenerated": false,
"ratio": 2.9300996839290057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39057251247290053,
"avg_score": null,
"num_lines": null
} |
# Author: Jamie Caesar
# Twitter: @j_cae
#
# This script will grab the detailed CDP information from a Cisco IOS or NX-OS
# device and export it to a CSV file containing the important information, such
# as Remote Device hostname, model and IP information, in addition to the local
# and remote interfaces that connect the devices.
#
# The path where the file is saved is specified in the "savepath" variable in
# the Main() function.
#
# This script is tested on SecureCRT version 7.2 on OSX Mavericks
import os
import datetime
import csv
import re
savepath = 'Dropbox/SecureCRT/Backups/'
mydatestr = '%Y-%m-%d-%H-%M-%S'
def GetHostname(tab):
'''
This function will capture the prompt of the device, by capturing the text
returned after sending a couple line feeds. Because the script will keep
running commands before the commands we send are echo'd back to us, we
have to add some "WaitForString"s so we capture only what we want.
'''
#Send two line feeds
tab.Send("\n\n")
tab.WaitForString("\n") # Waits for first linefeed to be echoed back to us
prompt = tab.ReadString("\n") #Read the text up to the next linefeed.
prompt = prompt.strip() #Remove any trailing control characters
# Check for non-enable mode (prompt ends with ">" instead of "#")
if prompt[-1] == ">":
return None
# Get out of config mode if that is the active mode when the script was launched
elif "(conf" in prompt:
tab.Send("end\n")
hostname = prompt.split("(")[0]
tab.WaitForString(hostname + "#")
# Return the hostname (everything before the first "(")
return hostname
# Else, Return the hostname (all of the prompt except the last character)
else:
return prompt[:-1]
def short_int(str):
'''
This function shortens the interface name for easier reading
'''
replace_pairs = [
('tengigabitethernet', 'T'),
('gigabitethernet', 'G'),
('fastethernet', 'F'),
('ethernet', 'e'),
('eth', 'e'),
('port-channel' , 'Po')
]
lower_str = str.lower()
for pair in replace_pairs:
if pair[0] in lower_str:
return lower_str.replace(pair[0], pair[1])
else:
return str
def short_name(name):
''' This function will remove any domain suffixes (.cisco.com) or serial numbers
that show up in parenthesis after the hostname'''
#TODO: Some devices give IP address instead of name. Need to ignore IP format.
#TODO: Some CatOS devices put hostname in (), instead of serial number. Find a way
# to catch this when it happens.
return name.split('.')[0].split('(')[0]
def CaptureOutput(command, prompt, tab):
'''
This function captures the raw output of the command supplied and returns it.
The prompt variable is used to signal the end of the command output, and
the "tab" variable is object that specifies which tab the commands are
written to.
'''
#Send term length command and wait for prompt to return
tab.Send('term length 0\n')
tab.WaitForString(prompt)
#Send command
tab.Send(command + "\n")
#Ignore the echo of the command we typed
tab.WaitForString(command)
#Capture the output until we get our prompt back and write it to the file
result = tab.ReadString(prompt)
#Send term length back to default
tab.Send('term length 24\n')
tab.WaitForString(prompt)
return result
def WriteFile(raw, filename, suffix = ".txt"):
'''
This function simply write the contents of the "raw" variable to a
file with the name passed to the function. The file suffix is .txt by
default unless a different suffix is passed in.
'''
newfile = open(filename + suffix, 'wb')
newfile.write(raw)
newfile.close()
def ParseCDP(rawdata):
'''
This function parses the raw "show cdp neighbors detail" output into
a data structure (a list of dictionaries) of only the important information,
which can be more easily used by other functions in the program.
'''
def GetSeperator(raw):
list = raw.split('\n')
for line in list:
if "-------" in line:
return line
else:
return None
regex = {
"Device" : re.compile(r"Device ID:.*", re.I),
"IP" : re.compile(r"IP\w* address:.*", re.I),
"Platform" : re.compile(r"Platform:.*,", re.I),
"LocalInt" : re.compile(r"Interface:.*,", re.I),
"RemoteInt" : re.compile(r"Port ID.*:.*", re.I)
}
devData = []
empty = re.compile(r"")
sep = GetSeperator(rawdata)
data_list = rawdata.split(sep)
for chunk in data_list:
devInfo = {}
chunk = chunk.strip()
if len(chunk) > 0:
for name, search in regex.iteritems():
tempsearch = search.findall(chunk)
if len(tempsearch) > 0:
temp = tempsearch[0].split(":")
else:
temp = ['','']
devInfo[name] = temp[1].strip().strip(',')
devData.append(devInfo)
return devData
def CDPtoCSV(data, filename, suffix=".csv"):
'''
This function takes the parsed CDP data and puts it into a CSV file with
the supplied filename. The default suffix is .csv unless a different one
is passed in.
'''
header = ['Local Intf', 'Remote ID', 'Remote Intf', 'IP Address', 'Platform']
newfile = open(filename + suffix, 'wb')
csvOut = csv.writer(newfile)
csvOut.writerow(header)
for device in data:
csvOut.writerow([short_int(device["LocalInt"]), short_name(device["Device"]),
short_int(device["RemoteInt"]), device["IP"], device["Platform"]])
newfile.flush()
newfile.close()
def Main():
'''
The purpose of this program is to capture the CDP information from the connected
switch and ouptut it into a CSV file.
'''
SendCmd = "show cdp neighbors detail"
#Create a "Tab" object, so that all the output goes into the correct Tab.
objTab = crt.GetScriptTab()
tab = objTab.Screen #Allows us to type "tab.xxx" instead of "objTab.Screen.xxx"
tab.Synchronous = True
tab.IgnoreEscape = True
#Get the prompt of the device
hostname = GetHostname(tab)
prompt = hostname + "#"
if hostname == None:
crt.Dialog.MessageBox("Either not in enable mode, or the prompt could not be detected")
else:
now = datetime.datetime.now()
mydate = now.strftime(mydatestr)
#Create Filename
filebits = [hostname, "cdp", mydate]
filename = '-'.join(filebits)
#Create path to save configuration file and open file
fullFileName = os.path.join(os.path.expanduser('~'), savepath + filename)
raw = CaptureOutput(SendCmd, prompt, tab)
cdpInfo = ParseCDP(raw)
CDPtoCSV(cdpInfo, fullFileName)
tab.Synchronous = False
tab.IgnoreEscape = False
Main()
| {
"repo_name": "DN0000/SecureCRT",
"path": "SaveCDPtoCSV.py",
"copies": "1",
"size": "7033",
"license": "apache-2.0",
"hash": -5067805128009831000,
"line_mean": 32.0187793427,
"line_max": 95,
"alpha_frac": 0.6323048486,
"autogenerated": false,
"ratio": 3.8347873500545258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4967092198654526,
"avg_score": null,
"num_lines": null
} |
# Author: Jamie Caesar
# Twitter: @j_cae
#
# This script will grab the route table information from a Cisco IOS device
# and export some statistics to a CSV file.
#
# The path where the file is saved is specified in the "savepath" variable in
# the Main() function.
#
# This script is tested on SecureCRT version 7.2 on OSX Mavericks
import os
import datetime
import csv
import re
import pickle
import sys
savepath = 'Dropbox/SecureCRT/Backups/'
mydatestr = '%Y-%m-%d-%H-%M-%S'
def GetHostname(tab):
'''
This function will capture the prompt of the device, by capturing the text
returned after sending a couple line feeds. Because the script will keep
running commands before the commands we send are echo'd back to us, we
have to add some "WaitForString"s so we capture only what we want.
'''
#Send two line feeds
tab.Send("\n\n")
tab.WaitForString("\n") # Waits for first linefeed to be echoed back to us
prompt = tab.ReadString("\n") #Read the text up to the next linefeed.
prompt = prompt.strip() #Remove any trailing control characters
# Check for non-enable mode (prompt ends with ">" instead of "#")
if prompt[-1] == ">":
return None
# Get out of config mode if that is the active mode when the script was launched
elif "(conf" in prompt:
tab.Send("end\n")
hostname = prompt.split("(")[0]
tab.WaitForString(hostname + "#")
# Return the hostname (everything before the first "(")
return hostname
# Else, Return the hostname (all of the prompt except the last character)
else:
return prompt[:-1]
def short_int(str):
'''
This function shortens the interface name for easier reading
'''
replace_pairs = [
('tengigabitethernet', 'T'),
('gigabitethernet', 'G'),
('fastethernet', 'F'),
('ethernet', 'e'),
('eth', 'e'),
('port-channel' , 'Po')
]
lower_str = str.lower()
for pair in replace_pairs:
if pair[0] in lower_str:
return lower_str.replace(pair[0], pair[1])
else:
return str
def CaptureOutput(command, prompt, tab):
'''
This function captures the raw output of the command supplied and returns it.
The prompt variable is used to signal the end of the command output, and
the "tab" variable is object that specifies which tab the commands are
written to.
'''
#Send term length command and wait for prompt to return
tab.Send('term length 0\n')
tab.WaitForString(prompt)
#Send command
tab.Send(command + "\n")
#Ignore the echo of the command we typed
tab.WaitForString(command)
#Capture the output until we get our prompt back and write it to the file
result = tab.ReadString(prompt)
#Send term length back to default
tab.Send('term length 24\n')
tab.WaitForString(prompt)
return result
def ParseRawRoutes(routelist):
DEBUG = False
routetable = []
# Various RegEx expressions to match varying parts of a route table line
# I did it this way to break up the regex into more manageable parts,
# Plus some of these parts can be found in mutliple line types
# I'm also using named groups to more easily extract the needed data.
#
# Protocol (letter code identifying route entry)
re_prot= r'(?P<protocol>\w[\* ][\w]{0,2})[ ]+'
# Matches network address of route: x.x.x.x/yy
re_net = r'(?P<network>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(/\d+)?)[ ]+'
# Matches the Metric and AD: i.e. [110/203213]
re_metric = r'\[(?P<ad>\d+)/(?P<metric>\d+)\][ ]+'
# Matches the next hop in the route statement - "via y.y.y.y"
re_nexthop = r'via (?P<nexthop>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}),?[ ]*'
# Matches the lifetime of the route, usually in a format like 2m3d. Optional
re_lifetime = r'(?P<lifetime>\w+)?(, )?'
# Matches outgoing interface. Not all protocols track this, so it is optional
re_interface = r'(?P<interface>\w+(/\d)*)?'
# Combining expressions above to build possible lines found in the route table
#
# Single line route entry
re_single = re_prot + re_net + re_metric + re_nexthop + re_lifetime + re_interface
# Directly connected route
re_connected = re_prot + re_net + 'is directly connected, ' + re_interface
# When the route length exceeds 80 chars, it is split across lines. This is
# the first line -- just the protocol and network.
re_multiline = re_prot + re_net
# This is the format seen for either a second ECMP path, or when the route has
# been broken up across lines becuase of the length.
re_ecmp = r'[ ]*' + re_metric + re_nexthop + re_lifetime + re_interface
#Compile RegEx expressions
reSingle = re.compile(re_single)
reConnected = re.compile(re_connected)
reMultiline = re.compile(re_multiline)
reECMP = re.compile(re_ecmp)
# Start parsing raw route table into a data structure. Each route entry goes
# into a dict, and all the entries are collected into a list.
for entry in routelist:
routeentry = {}
regex = reSingle.match(entry)
if regex:
# Need to track protocol and network in case the next line is a 2nd
# equal cost path (which doesn't show that info)
prev_prot = regex.group('protocol')
prev_net = regex.group('network')
routeentry = { "protocol" : prev_prot,
"network" : prev_net,
"AD" : regex.group('ad'),
"metric" : regex.group('metric'),
"nexthop" : regex.group('nexthop'),
"lifetime" : regex.group('lifetime'),
"interface" : regex.group('interface')
}
else:
regex = reConnected.match(entry)
if regex:
routeentry = { "protocol" : regex.group('protocol'),
"network" : regex.group('network'),
"AD" : 0,
"metric" : 0,
"nexthop" : None,
"interface" : regex.group('interface')
}
else:
regex = reMultiline.match(entry)
if regex:
# Since this is the first line in an entry that was broken
# up due to length, only record protocol and network.
# The next line has the rest of the data needed.
prev_prot = regex.group('protocol')
prev_net = regex.group('network')
else:
regex = reECMP.match(entry)
if regex:
# Since this is a second equal cost entry, use
# protocol and network info from previous entry
routeentry = { "protocol" : prev_prot,
"network" : prev_net,
"AD" : regex.group('ad'),
"metric" : regex.group('metric'),
"nexthop" : regex.group('nexthop'),
"lifetime" : regex.group('lifetime'),
"interface" : regex.group('interface')
}
else:
if DEBUG:
print "Skipping: " + entry
if routeentry != {}:
routetable.append(routeentry)
return routetable
def NextHopSummary(routelist):
summaryDict = {}
for entry in routelist:
if entry['nexthop']:
if entry['nexthop'] in summaryDict:
summaryDict[entry['nexthop']] += 1
else:
summaryDict[entry['nexthop']] = 1
nexthops = [['Next-hop', '# of routes']]
for key, value in summaryDict.iteritems():
nexthops.append([key, value])
return nexthops
def ListToCSV(data, filename, suffix=".csv"):
'''
This function takes a list and puts it into a CSV file with the supplied
filename. The default suffix is .csv unless a different one is passed in.
'''
newfile = open(filename + suffix, 'wb')
csvOut = csv.writer(newfile)
for line in data:
csvOut.writerow(line)
newfile.close()
def Main():
'''
The purpose of this program is to capture the CDP information from the connected
switch and ouptut it into a CSV file.
'''
SendCmd = "show ip route"
#Create a "Tab" object, so that all the output goes into the correct Tab.
objTab = crt.GetScriptTab()
tab = objTab.Screen #Allows us to type "tab.xxx" instead of "objTab.Screen.xxx"
tab.Synchronous = True
tab.IgnoreEscape = True
#Get the prompt of the device
hostname = GetHostname(tab)
prompt = hostname + "#"
if hostname == None:
crt.Dialog.MessageBox("Either not in enable mode, or the prompt could not be detected")
else:
now = datetime.datetime.now()
mydate = now.strftime(mydatestr)
#Create Filename
filebits = [hostname, "nexthops", mydate]
filename = '-'.join(filebits)
#Create path to save configuration file and open file
fullFileName = os.path.join(os.path.expanduser('~'), savepath + filename)
raw = CaptureOutput(SendCmd, prompt, tab)
routes = raw.split('\r\n')
routelist = ParseRawRoutes(routes)
summary = NextHopSummary(routelist)
ListToCSV(summary, fullFileName)
tab.Synchronous = False
tab.IgnoreEscape = False
Main()
| {
"repo_name": "DN0000/SecureCRT",
"path": "GetNextHops.py",
"copies": "1",
"size": "9844",
"license": "apache-2.0",
"hash": -5054337613390694000,
"line_mean": 36.2878787879,
"line_max": 95,
"alpha_frac": 0.5777123121,
"autogenerated": false,
"ratio": 4.0660883932259395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01456517079307472,
"num_lines": 264
} |
# Author: Jamie Caesar
# Twitter: @j_cae
#
# This SecureCRT script will prompt the user for a command to a Cisco IOS or NX-OS
# device and dump the output to a file. The path where the file is saved is
# specified in the "savepath" variable in the Main() function.
#
# This script is tested on SecureCRT version 7.2 on OSX Mavericks
#
import os
import datetime
savepath = 'Dropbox/SecureCRT/Backups/'
mydatestr = '%Y-%m-%d-%H-%M-%S'
def GetHostname(tab):
'''
This function will capture the prompt of the device. The script will capture the
text that is sent back from the remote device, which includes what we typed being
echoed back to us, so we have to account for that while we parse data.
'''
#Send two line feeds
tab.Send("\n\n")
tab.WaitForString("\n") # Waits for first linefeed to be echoed back to us
prompt = tab.ReadString("\n") #Read the text up to the next linefeed.
prompt = prompt.strip() #Remove any trailing control characters
# Check for non-enable mode (prompt ends with ">" instead of "#")
if prompt[-1] == ">":
return None
# Get out of config mode if that is the active mode when the script was launched
elif "(conf" in prompt:
tab.Send("end\n")
hostname = prompt.split("(")[0]
tab.WaitForString(hostname + "#")
# Return the hostname (everything before the first "(")
return hostname
# Else, Return the hostname (all of the prompt except the last character)
else:
return prompt[:-1]
def CaptureOutput(command, prompt, tab):
'''
This function captures the raw output of the command supplied and returns it.
The prompt variable is used to signal the end of the command output, and
the "tab" variable is object that specifies which tab the commands are
written to.
'''
#Send term length command and wait for prompt to return
tab.Send('term length 0\n')
tab.WaitForString(prompt)
#Send command
tab.Send(command)
#Ignore the echo of the command we typed
tab.WaitForString(command.strip())
#Capture the output until we get our prompt back and write it to the file
result = tab.ReadString(prompt)
#Send term length back to default
tab.Send('term length 24\n')
tab.WaitForString(prompt)
return result
def WriteFile(raw, filename):
'''
This function simply write the contents of the "raw" variable to a
file with the name passed to the function. The file suffix is .txt by
default unless a different suffix is passed in.
'''
newfile = open(filename, 'wb')
newfile.write(raw)
newfile.close()
def Main():
'''
This purpose of this program is to capture the output of the command entered by the
user and save it to a file. This method is much faster than manually setting a log
file, or trying to extract only the information needed from the saved log file.
'''
SendCmd = crt.Dialog.Prompt("Enter the command to capture")
if SendCmd == "":
return
else:
# Save command without spaces to use in output filename.
CmdName = SendCmd.replace(" ", "_")
# Add a newline to command before sending it to the remote device.
SendCmd = SendCmd + "\r\n"
#Create a "Tab" object, so that all the output goes into the correct Tab.
objTab = crt.GetScriptTab()
tab = objTab.Screen #Allows us to type "tab.xxx" instead of "objTab.Screen.xxx"
tab.Synchronous = True
tab.IgnoreEscape = True
#Get the prompt of the device
hostname = GetHostname(tab)
if hostname == None:
crt.Dialog.MessageBox("You must be in enable mode to run this script.")
else:
prompt = hostname + "#"
now = datetime.datetime.now()
mydate = now.strftime(mydatestr)
#Create Filename
filebits = [hostname, CmdName, mydate + ".txt"]
filename = '-'.join(filebits)
#Create path to save configuration file and open file
fullFileName = os.path.join(os.path.expanduser('~'), savepath + filename)
WriteFile(CaptureOutput(SendCmd, prompt, tab), fullFileName)
tab.Synchronous = False
tab.IgnoreEscape = False
Main()
| {
"repo_name": "DN0000/SecureCRT",
"path": "SaveOutput.py",
"copies": "1",
"size": "4300",
"license": "apache-2.0",
"hash": -4229957184237279700,
"line_mean": 32.8582677165,
"line_max": 87,
"alpha_frac": 0.6595348837,
"autogenerated": false,
"ratio": 3.9741219963031424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133656880003142,
"avg_score": null,
"num_lines": null
} |
#$language = "python"
#$interface = "1.0"
# Document_Device.py
#
# Description:
# Sends a series of Cisco Show commands one by one as listed in the
# COMMANDS array. The results of each command are captured into a
# variable, and then written to an individual log file (one log file
# for each command).
#
# Filename format is:
# ~/$savepath/<Host Name>-<Command Name>-<Date Format>.txt
import os
import subprocess
import datetime
import sys
# Adjust these to your environment
savepath = 'Configs/'
mydatestr = '%Y-%m-%d-%H-%M-%S'
COMMANDS = [
"show access-list",
"show call active voice brief",
"show call history voice brief",
"show cdp neighbors detail",
"show cdp neighbors",
"show clock",
"show controllers E1",
"show controllers T1",
"show crypto ipsec sa",
"show crypto isakmp sa",
"show crypto map",
"show debug",
"show dial-peer voice summary",
"show environment power"
"show etherchannel summary",
"show interface counters error",
"show interface description",
"show interface stats",
"show interface status",
"show interface summary",
"show interface transceiver detail",
"show interface transceiver",
"show interfaces",
"show inventory",
"show ip arp",
"show ip eigrp neighbor",
"show ip interface brief",
"show ip ospf neighbor",
"show ip protocols",
"show ip route 0.0.0.0",
"show ip route",
"show ipv6 interface brief",
"show ipv6 protocols",
"show ipv6 protocols",
"show ipv6 route",
"show log",
"show mac address-table dynamic",
"show mac address-table",
"show module",
"show policy-map interface"
"show policy-map",
"show port-channel summary",
"show power",
"show route-map",
"show running",
"show spanning-tree",
"show version",
"show vtp status",
"write"
]
def GetHostname(tab):
'''
This function will capture the prompt of the device. The script will capture the
text that is sent back from the remote device, which includes what we typed being
echoed back to us, so we have to account for that while we parse data.
'''
#Send two line feeds
tab.Send("\n\n")
tab.WaitForString("\n") # Waits for first linefeed to be echoed back to us
prompt = tab.ReadString("\n") #Read the text up to the next linefeed.
prompt = prompt.strip() #Remove any trailing control characters
# Check for non-enable mode (prompt ends with ">" instead of "#")
if prompt[-1] == ">":
return None
# Get out of config mode if that is the active mode when the script was launched
elif "(conf" in prompt:
tab.Send("end\n")
hostname = prompt.split("(")[0]
tab.WaitForString(hostname + "#")
# Return the hostname (everything before the first "(")
return hostname
# Else, Return the hostname (all of the prompt except the last character)
else:
return prompt[:-1]
def CaptureOutput(command, prompt, tab):
'''
This function captures the raw output of the command supplied and returns it.
The prompt variable is used to signal the end of the command output, and
the "tab" variable is object that specifies which tab the commands are
written to.
'''
#Send command
tab.Send(command)
#Ignore the echo of the command we typed
tab.WaitForString(command.strip())
#Capture the output until we get our prompt back and write it to the file
result = tab.ReadString(prompt)
return result
def WriteFile(raw, filename):
'''
This function simply write the contents of the "raw" variable to a
file with the name passed to the function. The file suffix is .txt by
default unless a different suffix is passed in.
'''
newfile = open(filename, 'wb')
newfile.write(raw)
newfile.close()
def main():
#Create a "Tab" object, so that all the output goes into the correct Tab.
objTab = crt.GetScriptTab()
tab = objTab.Screen #Allows us to type "tab.xxx" instead of "objTab.Screen.xxx"
tab.IgnoreEscape = True
tab.Synchronous = True
#Get the prompt of the device
hostname = GetHostname(tab)
if hostname == None:
crt.Dialog.MessageBox("You must be in enable mode to run this script.")
else:
prompt = hostname + "#"
now = datetime.datetime.now()
mydate = now.strftime(mydatestr)
#Send term length command and wait for prompt to return
tab.Send('term length 0\n')
tab.Send('term width 0\n')
tab.WaitForString(prompt)
for (index, SendCmd) in enumerate(COMMANDS):
SendCmd = SendCmd.strip()
# Save command without spaces to use in output filename.
CmdName = SendCmd.replace(" ", "_")
# Add a newline to command before sending it to the remote device.
SendCmd = SendCmd + "\n"
#Create Filename
filebits = [hostname, CmdName, mydate + ".txt"]
filename = '-'.join(filebits)
#Create path to save configuration file and open file
fullFileName = os.path.join(os.path.expanduser('~'), savepath + filename)
CmdResult = CaptureOutput(SendCmd, prompt, tab)
if "% Invalid input" not in CmdResult:
WriteFile(CmdResult, fullFileName)
CmdResult = ''
#Send term length back to default
tab.Send('term length 24\n')
tab.Send('term width 80\n')
tab.WaitForString(prompt)
tab.Synchronous = False
tab.IgnoreEscape = False
crt.Dialog.MessageBox("Device Documentation Script Complete", "Script Complete", 0)
main()
| {
"repo_name": "DN0000/SecureCRT",
"path": "Document_Device.py",
"copies": "1",
"size": "5211",
"license": "apache-2.0",
"hash": -9112977324417495000,
"line_mean": 27.3206521739,
"line_max": 84,
"alpha_frac": 0.7031279985,
"autogenerated": false,
"ratio": 3.3489717223650386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4552099720865038,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABFs01,GCCLABFs02
import os
import csv
import re
def main():
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
fileobj = open('vlanbrief7k.csv', 'wb')
intdesc = open('intdesc7k.csv','rb')
worksheet = csv.writer(fileobj)
intdescws = csv.reader(intdesc)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show vlan brief\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
write = True
portid = ['']
while result != 2:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Fetch current row and read characters from the screen on that row.
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line on whitespace and put some fields into Excel
items = readline.split()
# Write the vlan id and name on the document,
# Then, write each vlan port and description that appears for the vlan
if items!=[] and re.match(r'\d+',items[0]):
worksheet.writerow(items[0:2])
intdesc.seek(0)
items = items[3:]
# Write each vlan port and description that appears for the vlan
for item in items:
if re.match(r'(Eth\d+|Po\d+)',item):
while portid[0] != item.replace(',',''):
portid = next(intdescws)
worksheet.writerow([item.replace(',',''), portid[1]])
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
fileobj.close()
intdesc.close()
crt.Screen.Synchronous = False
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "showvlanbrief_7k.py",
"copies": "1",
"size": "1721",
"license": "artistic-2.0",
"hash": -5224165668528648000,
"line_mean": 25.0757575758,
"line_max": 72,
"alpha_frac": 0.6786751888,
"autogenerated": false,
"ratio": 3.1636029411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8212131851721756,
"avg_score": 0.22602925565094278,
"num_lines": 66
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABFs01,GCCLABFs02
import os
import csv
#import re
def main():
crt.Screen.Synchronous = True
vid = crt.Dialog.Prompt("Enter vlan id ", "Vlan", "", False)
# Create an Excel compatible spreadsheet
fileobj = open('vlanid'+vid+'_7k.csv', 'ab')
worksheet = csv.writer(fileobj)
intdesc = open('intdesc7k.csv','rb')
intdescws = csv.reader(intdesc)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show vlan id "+vid+"\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
write = True
portid = ['']
while result != 2:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Fetch current row and read characters from the screen on that row.
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line on whitespace and put some fields into Excel
items = readline.split()
# Write the vlan id and name on the document
if items!=[] and items[0] == vid and write:
worksheet.writerow(items[0:2])
write = False
# Write each vlan port and description that appears for the vlan
for item in items:
if item[0:3] == "Eth": # re.match(r'(Eth\d+|Po\d+)',item):
while portid[0] != item.replace(',',''):
portid = next(intdescws)
worksheet.writerow([item.replace(',',''), portid[1]])
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
fileobj.close()
intdesc.close()
crt.Screen.Synchronous = False
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "showvlanidvid_7k.py",
"copies": "1",
"size": "1718",
"license": "artistic-2.0",
"hash": -5789295401713680000,
"line_mean": 25.4307692308,
"line_max": 71,
"alpha_frac": 0.6682188591,
"autogenerated": false,
"ratio": 3.1236363636363635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4291855222736363,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABFs01,GCCLABFs02
import os
import csv
#import re
def vlans(vid):
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
fileobj = open('allvlans7k.csv','ab')
intdesc = open('intdesc7k.csv','rb')
worksheet = csv.writer(fileobj)
intdescws = csv.reader(intdesc)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show vlan id "+vid+"\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
write = True
portid = ['']
while result != 2:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Fetch current row and read characters from the screen on that row.
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line on whitespace and put some fields into Excel
items = readline.split()
# Write the vlan id and name on the document
if items!=[] and items[0] == vid and write:
worksheet.writerow(items[0:2])
write = False
# Write each vlan port and description that appears for the vlan
for item in items:
if item[0:3] == "Eth": # re.match(r'(Eth\d+|Po\d+)',item):
while portid[0] != item.replace(',',''):
portid = next(intdescws)
worksheet.writerow([item.replace(',',''), portid[1]])
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
fileobj.close()
intdesc.close()
crt.Screen.Synchronous = False
def main():
vidf = open('vlans7k.csv','rb')
for i in vidf:
vlans(i.split(',')[0])
vidf.close()
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "allvlanids_7k.py",
"copies": "1",
"size": "1756",
"license": "artistic-2.0",
"hash": 6800028602247554000,
"line_mean": 23.7323943662,
"line_max": 71,
"alpha_frac": 0.666856492,
"autogenerated": false,
"ratio": 3.059233449477352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.807687330554075,
"avg_score": 0.22984332718732048,
"num_lines": 71
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABFs01,GCCLABFs02
import os
import csv
def main():
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
filename = crt.Dialog.Prompt("Enter file name to write to: ", "Show Interface Description", "intdesc7k.csv", False)
fileobj = open(filename, 'wb')
worksheet = csv.writer(fileobj)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","GCCLABFs01#","GCCLABFs02#"]
result = crt.Screen.WaitForStrings( waitStrs )
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
# If see a prompt, we're done
if result == 2:
break
# Fetch current row and read the characters from the screen
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line and put some fields into Excel
if readline[0:3] == "Eth":
items = [readline[0:14].strip(),readline[29:140].strip()]
worksheet.writerow(items)
fileobj.close()
crt.Screen.Synchronous = False
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "shintdesc_7k.py",
"copies": "1",
"size": "1327",
"license": "artistic-2.0",
"hash": -6550734718966654000,
"line_mean": 24.5192307692,
"line_max": 116,
"alpha_frac": 0.6963074604,
"autogenerated": false,
"ratio": 3.137115839243499,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4333423299643499,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABVM1,GCCLABVM2
import os
import csv
import re
def main():
crt.Screen.Synchronous = True
vid = crt.Dialog.Prompt("Enter vlan id ", "Vlan", "", False)
# Create an Excel compatible spreadsheet
fileobj = open('vlanid'+vid+'.csv', 'wb')
worksheet = csv.writer(fileobj)
intdesc = open("intdesc.csv",'rb')
intdescws = csv.reader(intdesc)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show vlan id "+vid+"\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
write = True
portid = ['']
while result != 2:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Fetch current row and read characters from the screen on that row.
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line on whitespace and put some fields into Excel
items = readline.split()
# Write the vlan id and name on the document
if items!=[] and items[0] == vid and write:
worksheet.writerow(items[0:2])
write = False
# Write each vlan port and description that appears for the vlan
for item in items:
if re.match(r'(Gi\d+)',item):
while portid[0] != item.replace(',',''):
portid = next(intdescws)
worksheet.writerow([item.replace(',',''), portid[1]])
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
fileobj.close()
intdesc.close()
crt.Screen.Synchronous = False
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "showvlanidvid_legacy.py",
"copies": "1",
"size": "1679",
"license": "artistic-2.0",
"hash": -7051099530736382000,
"line_mean": 25.234375,
"line_max": 71,
"alpha_frac": 0.6724240619,
"autogenerated": false,
"ratio": 3.18595825426945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43583823161694496,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABVM1,GCCLABVM2
import os
import csv
def main():
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
filename = crt.Dialog.Prompt("Enter file name to write to: ", "Show Interface Description", "intdesc.csv", False)
fileobj = open(filename, 'wb')
worksheet = csv.writer(fileobj)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show interface description\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
# If see a prompt, we're done
if result == 2:
break
# Fetch current row and read the characters from the screen
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# if readline[0:2] == "Vl":
# break
# Split the line and put some fields into Excel
items = [readline[0:31].strip(),readline[55:140].strip()]
worksheet.writerow(items)
fileobj.close()
#crt.Dialog.MessageBox("Done! Your file is saved in "+filename)
crt.Screen.Synchronous = False
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "shintdesc_legacy.py",
"copies": "1",
"size": "1392",
"license": "artistic-2.0",
"hash": -2626914138821538000,
"line_mean": 23.8571428571,
"line_max": 114,
"alpha_frac": 0.694683908,
"autogenerated": false,
"ratio": 3.185354691075515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4380038599075515,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCLABVM1,GCCLABVM2
import os
import csv
def vlans(vid):
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
filename = 'allvlans.csv'
fileobj = open(filename, 'ab')
intdesc = open('intdesc.csv','rb')
worksheet = csv.writer(fileobj)
intdescws = csv.reader(intdesc)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show vlan id "+vid+"\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
write = True
portid = ['']
while result != 2:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Fetch current row and read characters from the screen on that row.
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line on whitespace and put some fields into Excel
items = readline.split()
# Write the vlan id and name on the document
if items!=[] and items[0] == vid and write:
worksheet.writerow(items[0:2])
write = False
# Write each vlan port and description that appears for the vlan
for item in items:
if item[0:2] == "Gi":
while portid[0] != item.replace(',',''):
crt.Dialog.MessageBox(item)
portid = next(intdescws)
worksheet.writerow([item.replace(',',''), portid[1]])
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
fileobj.close()
intdesc.close()
crt.Screen.Synchronous = False
def main():
vidf = open('vlans.txt','rb')
for i in vidf:
vlans(i.strip('\n'))
vidf.close()
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "allvlanids_legacy.py",
"copies": "1",
"size": "1753",
"license": "artistic-2.0",
"hash": 6160089622922742000,
"line_mean": 23.3472222222,
"line_max": 71,
"alpha_frac": 0.6697090702,
"autogenerated": false,
"ratio": 3.119217081850534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8157444943936889,
"avg_score": 0.22629624162272913,
"num_lines": 72
} |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
import sys
import datetime
import msvcrt
int_total10g = 0
int_total1g = 0
def connect(str_hostname, str_username, str_password, csvws_10g, csvws_1g, csvws_pair):
"""
(str, str, str, csv.writer, csv.writer) -> None
Connects to str_hostname with credentials str_username and str_password, calls 'sh int desc',
and lists all avaliable 10G ports on csvws_10g document and 1G ports on csvws_1g document,
with both numbers of ports on each line per host on the csvws_pair spreadsheet,
as well as recording the total number of available ports on each spreadsheet
"""
# Declaring global variables to count total ports
global int_total10g
global int_total1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname+".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Write the hostname on the documents
csvws_10g.writerow([str_hostname])
csvws_1g.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates done.
int_result = crt.Screen.WaitForStrings( list_str_waitfor )
# If see a prompt, then we're done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r'\s{2,}',str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ''
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r'Eth\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_10g.writerow([str_port,str_desc])
int_total10g += 1
int_port10g += 1
elif re.match(r'Eth1\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_1g.writerow([str_port,str_desc])
int_total1g += 1
int_port1g += 1
# Write the number of available ports on the document
csvws_10g.writerow([str(int_port10g)+" ports available for "+str_hostname])
csvws_1g.writerow([str(int_port1g)+" ports available for "+str_hostname])
csvws_10g.writerow([])
csvws_1g.writerow([])
csvws_pair.writerow([str_hostname, str(int_port10g), str(int_port1g)])
# Disconnect from the session
crt.Screen.Synchronous = False
crt.Session.Disconnect()
def main():
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": return
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": return
# Get today's date (in ISO 8601 format: yyyy-mm-dd) and current time (24-hour format)
str_nowfilename = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")#datetime.date.today()#
str_nowtext = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# Create csv worksheets to record to
file_10g = open("intdesc_both_10g_%s.csv"%(str_nowfilename), 'wb')
csvws_10g = csv.writer(file_10g)
file_1g = open("intdesc_both_1g_%s.csv"%(str_nowfilename), 'wb')
csvws_1g = csv.writer(file_1g)
file_pair = open("ports_per_pair_%s.csv"%(str_nowfilename), 'wb')
csvws_pair = csv.writer(file_pair)
# Writing header lines, indicating the date of report
file_10g.write("Available 10G ports as of %s\n" % (str_nowtext))
file_1g.write("Available 1G ports as of %s\n" % (str_nowtext))
file_pair.write("Available ports as of %s\n" % (str_nowtext))
csvws_pair.writerow(["Hostname", "10G ports available", "1G ports available"])
# Iterate through each of the hosts, recording the available ports
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
# Write total number of avaliable ports on the documents
file_10g.write("%s total available ports as of %s" % (str(int_total10g), str_nowtext))
file_1g.write("%s total available ports as of %s" % (str(int_total1g), str_nowtext))
csvws_pair.writerow(["Total", str(int_total10g), str(int_total1g)])
# Close the documents
file_10g.close()
file_1g.close()
file_pair.close()
# Display total port availability in a dialog box
str_message = "Results:\n--------\n10G: %d available ports\n1G: %d available ports\n\nRetrieved on: %s" % (int_total10g, int_total1g, str_nowtext)
crt.Dialog.MessageBox(str_message, "Final Report", 64)
if __name__ == '__builtin__':
main()
elif __name__ == '__main__':
print "This program must be run in SecureCRT"
print "Open SecureCRT, go to Script > Run... , then select this file"
print "(%s)" % (sys.argv[0])
print
print "Press any key to exit",
msvcrt.getch()
else:
pass
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "sfrms_iso8601.py",
"copies": "1",
"size": "5485",
"license": "artistic-2.0",
"hash": 6318878909526014000,
"line_mean": 35.5666666667,
"line_max": 147,
"alpha_frac": 0.6958979034,
"autogenerated": false,
"ratio": 2.844917012448133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8212623097664915,
"avg_score": 0.16563836363664344,
"num_lines": 150
} |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
import sys
import datetime
import msvcrt
# Port count for GCC or SCC servers
int_xcc_10g = 0
int_xcc_1g = 0
# Port count for all servers
int_total10g = 0
int_total1g = 0
def connect(str_hostname, str_username, str_password, csvws_10gports, csvws_1gports, csvws_portsperpair):
"""
(str, str, str, csv.writer, csv.writer, csv.writer) -> None
Connects to str_hostname with credentials str_username and str_password, calls 'sh int desc',
and lists all avaliable 10G ports on csvws_10gports document and 1G ports on csvws_1gports document,
with both numbers of ports on each line per host on the csvws_portsperpair spreadsheet,
as well as recording the total number of available ports on each spreadsheet
"""
# Declaring global variables to count total ports
global int_total10g
global int_total1g
global int_xcc_10g
global int_xcc_1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname + ".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Write the hostname on the documents
csvws_10gports.writerow([str_hostname])
csvws_1gports.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname + "#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates that it's done.
int_result = crt.Screen.WaitForStrings(list_str_waitfor)
# If see a prompt, then we're done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r"\s{2,}", str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ""
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r"Eth\d{2}/", str_port) and re.match(r"V,", str_desc, flags=re.IGNORECASE):
csvws_10gports.writerow([str_port, str_desc])
int_total10g += 1
int_xcc_10g += 1
int_port10g += 1
elif re.match(r"Eth1\d{2}/", str_port) and re.match(r"V,", str_desc, flags=re.IGNORECASE):
csvws_1gports.writerow([str_port, str_desc])
int_total1g += 1
int_xcc_1g +=1
int_port1g += 1
# Write the number of available ports on the document
csvws_10gports.writerow(["Available ports for %s: %d" % (str_hostname, int_port10g)])
csvws_1gports.writerow(["Available ports for %s: %d" % (str_hostname, int_port1g)])
csvws_10gports.writerow([])
csvws_1gports.writerow([])
csvws_portsperpair.writerow([str_hostname, str(int_port10g), str(int_port1g)])
# Disconnect from the session
crt.Screen.Synchronous = False
crt.Session.Disconnect()
def main():
"""
() -> None
Main function; creates the nessessary csv files to be written to and writes the
appropriate data to them; connects to each of the server farm hosts sequentially,
with the csv files recording the available hosts
"""
global int_xcc_10g
global int_xcc_1g
# Get today's date (in ISO 8601 format: yyyy-mm-dd) and current time (in 24-hour format)
str_now_filename = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
str_now_text = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": return
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": return
if str_pwd == "\x6d\x65\x72\x72\x79\x20\x63\x68\x72\x69\x73\x74\x6d\x61\x73":
str_now_filename = datetime.datetime.now().strftime("%Y-\x31\x32-\x32\x35_%H-%M")
str_now_text = datetime.datetime.now().strftime("%Y-\x31\x32-\x32\x35 %H:%M")
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
# Create csv worksheets to record to
try:
file_10g = open("intdesc_10g_%s.csv" % (str_now_filename), "wb")
file_1g = open("intdesc_1g_%s.csv" % (str_now_filename), "wb")
file_pair = open("ports_per_pair_%s.csv" % (str_now_filename), "wb")
except IOError:
crt.Dialog.MessageBox("Please close all open Excel files and try again", "Error", 16)
return
csvws_10g = csv.writer(file_10g)
csvws_1g = csv.writer(file_1g)
csvws_pair = csv.writer(file_pair)
# Writing header lines, indicating the date of report
file_10g.write("Available 10G ports as of %s\n\n" % (str_now_text))
file_1g.write("Available 1G ports as of %s\n\n" % (str_now_text))
file_pair.write("Available ports as of %s\n" % (str_now_text))
csvws_pair.writerow(["Hostname", "10G ports", "1G ports"])
csvws_pair.writerow(["-------------", "-------------", "-------------"])
# Iterate through each of the hosts, recording the available ports
# GCC Servers
file_pair.write("GCC SERVERS,,\n")
file_10g.write("GCC SERVERS\n\n")
file_1g.write("GCC SERVERS\n\n")
for i in range(1, 7):
connect("GCCSFRMs" + str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
# Write the total on the spreadsheet and reset global variables to count for the GCC servers
int_gcc_10g = int_xcc_10g
int_gcc_1g = int_xcc_1g
int_xcc_10g = 0
int_xcc_1g = 0
csvws_pair.writerow(["-------------", "-------------", "-------------"])
csvws_pair.writerow(["GCC Total", str(int_gcc_10g), str(int_gcc_1g)])
csvws_pair.writerow(["-------------", "-------------", "-------------"])
# SCC Servers
file_pair.write("SCC SERVERS,,\n")
file_10g.write("\nSCC SERVERS\n\n")
file_1g.write("\nSCC SERVERS\n\n")
for i in range(1, 13):
connect("SCCSFRMs" + str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
# Write the total on the spreadsheet and reset global variables to count for the SCC servers
int_scc_10g = int_xcc_10g
int_scc_1g = int_xcc_1g
int_xcc_10g = 0
int_xcc_1g = 0
csvws_pair.writerow(["-------------", "-------------", "-------------"])
csvws_pair.writerow(["SCC Total", str(int_scc_10g), str(int_scc_1g)])
# Write total number of avaliable ports on the documents
file_10g.write("Available GCC ports: " + str(int_gcc_10g))
file_1g.write("Available GCC ports: " + str(int_gcc_1g))
file_10g.write("\nAvailable SCC ports: " + str(int_scc_10g))
file_1g.write("\nAvailable SCC ports: " + str(int_scc_1g))
file_10g.write("\nTotal available 10G ports: " + str(int_total10g))
file_1g.write("\nTotal available 1G ports: " + str(int_total1g))
file_10g.write("\nRetrieved on: " + str_now_text)
file_1g.write("\nRetrieved on: " + str_now_text)
csvws_pair.writerow(["=============", "=============", "============="])
csvws_pair.writerow(["Total", str(int_total10g), str(int_total1g)])
# Close the documents
file_10g.close()
file_1g.close()
file_pair.close()
# Display total port availability in a dialog box
str_message = "Total Available Ports:\n" \
"=======================\n" \
"GCC: %d 10G ports, %d 1G ports\n" \
"SCC: %d 10G ports, %d 1G ports\n" \
"=======================\n" \
"Total: %d 10G ports, %d 1G ports\n" \
"=======================\n" \
"Retrieved on: %s" % (int_gcc_10g, int_gcc_1g, int_scc_10g, int_scc_1g, int_total10g, int_total1g, str_now_text)
crt.Dialog.MessageBox(str_message, "Final Report", 64)
if __name__ == "__builtin__":
main()
elif __name__ == "__main__":
print "\aThis program must be run in SecureCRT"
print "\aOpen SecureCRT, go to Script > Run... , then select this file"
print "\a(%s)" % (sys.argv[0])
print
print "Press any key to exit",
msvcrt.getch()
else:
pass
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "sh_int_desc.py",
"copies": "1",
"size": "8153",
"license": "artistic-2.0",
"hash": 87620879255111550,
"line_mean": 36.5714285714,
"line_max": 121,
"alpha_frac": 0.6601251073,
"autogenerated": false,
"ratio": 2.74881995954147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39089450668414694,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
import sys
import msvcrt
int_total10g = 0
int_total1g = 0
def connect(str_hostname, str_username, str_password, csvws_10g, csvws_1g, csvws_pair):
"""
(str, str, str, csv.writer, csv.writer) -> None
Connects to str_hostname with credentials str_username and str_password, calls 'sh int desc',
and lists all avaliable 10g ports on csvws_10g document and 1g ports on csvws_1g document,
as well as recording the total number of available ports on each spreadsheet,
and for each pair on the csvws_pair spreadsheet
"""
# Declaring global variables to count total ports
global int_total10g
global int_total1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname+".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Write the hostname on the documents
csvws_10g.writerow([str_hostname])
csvws_1g.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates done.
int_result = crt.Screen.WaitForStrings( list_str_waitfor )
# If see a prompt, then we're done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r'\s{2,}',str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ''
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r'Eth\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_10g.writerow([str_port,str_desc])
int_total10g += 1
int_port10g += 1
elif re.match(r'Eth1\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_1g.writerow([str_port,str_desc])
int_total1g += 1
int_port1g += 1
# Write the number of available ports on the document
csvws_10g.writerow([str(int_port10g)+" ports available for "+str_hostname])
csvws_1g.writerow([str(int_port1g)+" ports available for "+str_hostname])
csvws_10g.writerow([])
csvws_1g.writerow([])
csvws_pair.writerow([str_hostname, str(int_port10g), str(int_port1g)])
# Disconnect from the session
crt.Screen.Synchronous = False
crt.Session.Disconnect()
# Main Function
def main():
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": pass
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": pass
# Create csv worksheets to record to
file_10g = open("intdesc_both_10g.csv", 'wb')
csvws_10g = csv.writer(file_10g)
file_1g = open("intdesc_both_1g.csv", 'wb')
csvws_1g = csv.writer(file_1g)
file_pair = open("ports_per_pair.csv", 'wb')
csvws_pair = csv.writer(file_pair)
csvws_pair.writerow(["Hostname", "10G ports available", "1G ports available"])
# Iterate through each of the hosts, recording the available ports
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2), str_usr, str_pwd, csvws_10g, csvws_1g, csvws_pair)
# Done, display total port availability
crt.Dialog.MessageBox("Results:\n--------\n10G: "+str(int_total10g)+" available ports\n1G: "+str(int_total1g)+" available ports", "Final Report", 64)
# And write those totals on the documents
file_10g.write("Total available ports: "+str(int_total10g))
file_1g.write("Total available ports: "+str(int_total1g))
csvws_pair.writerow(["Total", str(int_total10g), str(int_total1g)])
file_10g.close()
file_1g.close()
file_pair.close()
if __name__ == '__builtin__':
main()
elif __name__ == '__main__':
print "This program must be run in SecureCRT"
print "Open SecureCRT, go to Script > Run... , then select this file"
print "("+sys.argv[0]+")"
print
print "Press any key to exit",
msvcrt.getch()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "sfrms_pairs.py",
"copies": "1",
"size": "4716",
"license": "artistic-2.0",
"hash": 15388217250624512,
"line_mean": 33.6764705882,
"line_max": 150,
"alpha_frac": 0.6957167091,
"autogenerated": false,
"ratio": 2.852994555353902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8259835920346604,
"avg_score": 0.15777506882145942,
"num_lines": 136
} |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
import sys
import msvcrt
int_total10g = 0
int_total1g = 0
def connect(str_hostname,str_username,str_password):
# Declaring global variables to count total ports
global int_total10g
global int_total1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname+".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Create two csv worksheets to record to
file_10g = open("intdesc_both_10g.csv", 'ab')
csvws_10g = csv.writer(file_10g)
file_1g = open("intdesc_both_1g.csv", 'ab')
csvws_1g = csv.writer(file_1g)
# Write the hostname on the documents
csvws_10g.writerow([str_hostname])
csvws_1g.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
int_result = crt.Screen.WaitForStrings( list_str_waitfor )
# If see a prompt, then done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r'\s{2,}',str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ''
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r'Eth\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_10g.writerow([str_port,str_desc])
int_total10g += 1
int_port10g += 1
elif re.match(r'Eth1\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_1g.writerow([str_port,str_desc])
int_total1g += 1
int_port1g += 1
# Write the number of available ports on the document
csvws_10g.writerow([str(int_port10g)+" ports available for "+str_hostname])
csvws_1g.writerow([str(int_port1g)+" ports available for "+str_hostname])
csvws_10g.writerow([])
csvws_1g.writerow([])
# Close files and disconnect from the session
file_10g.close()
file_1g.close()
crt.Screen.Synchronous = False
crt.Session.Disconnect()
# Main Function
def main():
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": pass
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": pass
# Delete any existing versions of the worksheet, otherwise this program will append to them
try:
os.remove("intdesc_both_10g.csv")
except Exception:
pass
try:
os.remove("intdesc_both_1g.csv")
except Exception:
pass
# Iterate thru each of the hosts, recording the available ports
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2),str_usr,str_pwd)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2),str_usr,str_pwd)
# Done, display total port avaliability
crt.Dialog.MessageBox("Results:\n--------\n10G: "+str(int_total10g)+" available ports\n1G: "+str(int_total1g)+" available ports", "Final Report", 65)
# And write those totals on the documents
file_10g = open("intdesc_both_10g.csv", 'ab')
file_1g = open("intdesc_both_1g.csv", 'ab')
file_10g.write("Total available ports: "+str(int_total10g))
file_1g.write("Total available ports: "+str(int_total1g))
file_10g.close()
file_1g.close()
if __name__ == '__builtin__':
main()
elif __name__ == '__main__':
print "This program must be run in SecureCRT"
print "Open SecureCRT, go to Script > Run... , then select this file"
print "("+sys.argv[0]+")"
print
print "Press any key to exit",
msvcrt.getch()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "shintdesc_sfrms.py",
"copies": "1",
"size": "4287",
"license": "artistic-2.0",
"hash": 3276675791262315500,
"line_mean": 30.2919708029,
"line_max": 150,
"alpha_frac": 0.6906927922,
"autogenerated": false,
"ratio": 2.831571994715984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.818931652064558,
"avg_score": 0.1665896532540808,
"num_lines": 137
} |
# $language = "python"
# $interface = "1.0"
# for GCCSFRMs01-06, SCCSFRMs01-12
import os
import csv
import re
def connect(hostname,username,password):
# Connect to the session
cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (username, password, hostname+".rns.fg.rbc.com")
crt.Session.Connect(cmd)
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
fileobj = open(filename, 'ab')
worksheet = csv.writer(fileobj)
# Write the hostname on the document
worksheet.writerow([hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n", hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
# If see a prompt, we're done
if result == 2:
break
# Fetch current row and read the characters from the screen
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# Split the line and put some fields into Excel
items = re.split(r'\s{2,}',readline.strip())
if items!=['']:
worksheet.writerow(items)
worksheet.writerow([])
fileobj.close()
crt.Screen.Synchronous = False
crt.Session.Disconnect()
def main():
usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "316799659", False)
if usr == "": return
pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if pwd == "": return
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2),usr,pwd)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2),usr,pwd)
filename = crt.Dialog.Prompt("Enter file name to write to: ", "Show Interface Description", "intdesc.csv", False)
if filename != "":
main()
| {
"repo_name": "kelvinongtoronto/SecureCRT",
"path": "shintdesc_gcc_scc.py",
"copies": "1",
"size": "1969",
"license": "artistic-2.0",
"hash": 395250507047055170,
"line_mean": 27.1285714286,
"line_max": 114,
"alpha_frac": 0.6841036059,
"autogenerated": false,
"ratio": 3.0479876160990713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42320912219990714,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
from ConfigParser import NoOptionError
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
from securecrt_tools.message_box_const import ICON_QUESTION, BUTTON_YESNO, IDYES, IDNO
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def document(session, command_list_name, folder_per_device, prompt_create_dirs=True):
"""
This function captures the output of the provided commands and writes them to files. This is separated into a
separate function so it can be called by both the single-device and multi-device version of this script.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
:param command_list: A list of commands that will be sent to the connected device. Each output will be saved to a
different file.
:type command_list: list
:param output_dir: The full path to the directory where the output files are written.
:type output_dir: str
:param folder_per_device: A boolean that if true will create a separate folder for each device
:type folder_per_device: bool
:return:
"""
script = session.script
# Get command list for this device. This is done here instead of the main script so this function can be used by
# the multi-device version of this script.
if command_list_name:
try:
command_list = script.settings.getlist("document_device", command_list_name)
except NoOptionError:
script.message_box("The list {0} was not found in [document_device] section of the settings.ini file."
.format(command_list_name))
return
else:
try:
# Not using custom lists, so just get the list for the OS
command_list = script.settings.getlist("document_device", session.os)
except NoOptionError:
script.message_box("The list {0} was not found in [document_device] section of the settings.ini file."
.format(session.os))
return
if folder_per_device:
output_dir = os.path.join(script.output_dir, utilities.path_safe_name(session.hostname))
else:
output_dir = script.output_dir
# Loop through each command and write the contents to a file.
for command in command_list:
# Generate filename used for output files.
full_file_name = session.create_output_filename(command, include_hostname=not folder_per_device,
base_dir=output_dir)
# Get the output of our command and save it to the filename specified
session.write_output_to_file(command, full_file_name, prompt_to_create=prompt_create_dirs)
# If we captured nothing, or an error then delete the file
utilities.remove_empty_or_invalid_file(full_file_name)
def script_main(session):
"""
| SINGLE device script
| Author: Jamie Caesar
| Email: jcaesar@presidio.com
This script will grab the output for a list of commands from the connected device. The list of commands is taken
from the 'settings/settings.ini' file. There is a separate list for each supported network operating system (IOS,
NXOS and ASA) and by default the list that matches the network operating system of the connected device will be
used.
Custom lists of commands are supported. These lists can be added manually to the [document_device] section of the
'settings/settings.ini' file. To be able to choose one of these lists when running the script, the
'prompt_for_custom_lists' setting needs to be changed to 'True' in the settings.ini file. Once this option is
enabled, the script will prompt for the name of the list that you want to use. If the input is left blank then
the default behavior (based on network OS) will choose the list.
**Script Settings** (found in settings/settings.ini):
* | **show_instructions** - When True, displays a pop-up upon launching the script
| explaining where to modify the list of commands sent to devices. This window also
| prompts the user if they want to continue seeing this message. If not, the script
| changes this setting to False.
* | **folder_per_device** - If True, Creates a folder for each device, based on the
| hostname, and saves all files inside that folder WITHOUT the hostname in the output
| file names. If False, it saves all the files directly into the output folder from
| the global settings and includes the hostname in each individual filename.
* | **prompt_for_custom_lists** - When set to True, the script will prompt the user to
| type the name of a list of commands to use with the connected device. This list
| name must be found as an option in the [document_device] section of the
| settings.ini file. The format is the same as the default network OS lists, 'ios',
| 'nxos', etc.
* | **ios** - The list of commands that will be run on IOS devices
* | **nxos** - The list of commands that will be run on NXOS devices
* | **asa** - The list of commands that will be run on ASA devices
**Any additional options found in this section would be custom added by the user and are expected to be lists of
commands for use with the 'prompt_for_custom_lists' setting.**
By default, The outputs will be saved in a folder named after the hostname of the device, with each output file
being saved inside that directory. This behavior can be changed in the settings above.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["IOS", "NXOS", "ASA", "IOS-XR", "AireOS"])
# Display instructions message, unless settings prevent it
show_instructions = script.settings.getboolean("document_device", "show_instructions")
if show_instructions:
response = script.message_box("The list of commands sent to the device can be edited in the 'settings/settings."
"ini' file in the main securecrt-tools directory.\nSee the documentation for this"
" script for more details.\n\nDo you want to stop seeing this message?",
"Instructions", ICON_QUESTION + BUTTON_YESNO)
if response == IDYES:
script.settings.update("document_device", "show_instructions", False)
# Check if settings allow for custom lists, and if so prompt for the list to use -- if not, just use the list for
# the OS of the device connected
custom_allowed = script.settings.getboolean("document_device", "prompt_for_custom_lists")
if custom_allowed:
command_list_name = script.prompt_window("Enter the name of the command list you want to use.\n\nThese lists are found "
"in the [document_device] section of your settings.ini file\n",
"Enter command list")
else:
command_list_name = None
folder_per_device = script.settings.getboolean("document_device", "folder_per_device")
# Document scripts according to settings captured above. If we want folder_per_device, don't include hostname in
# the filename and vice versa.
document(session, command_list_name, folder_per_device)
# Return terminal parameters back to the original state.
session.end_cisco_session()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
try:
script_main(crt_session)
except Exception:
crt_session.end_cisco_session()
raise
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
| {
"repo_name": "jamiecaesar/SecureCRT",
"path": "s_document_device.py",
"copies": "2",
"size": "9880",
"license": "apache-2.0",
"hash": -70414872157356770,
"line_mean": 49.6666666667,
"line_max": 128,
"alpha_frac": 0.6733805668,
"autogenerated": false,
"ratio": 4.333333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6006713900133334,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
from datetime import datetime
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import sessions
from securecrt_tools import utilities
# Import message box constants names for use specifying the design of message boxes
from securecrt_tools.message_box_const import *
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ##################################################
def get_manufacture_date(serial):
"""
A function that will decode the manufacture date of a device from its serial number.
:param serial: The serial number of a Cisco device, which should be 11 digits long.
:type serial: str
:return: The month and year the device was manufactured in string format. (e.g. "September 2010")
"""
logger.debug("Received {0} as input".format(serial))
if len(serial) == 11:
try:
year = str(1996 + int(serial[3:5]))
week = int(serial[5:7])
except ValueError:
logger.debug("Could not convert {0} or {1} to an int".format(serial[3:5], serial[5:7]))
return ""
date_of_serial = datetime.strptime('{} {} 1'.format(year, week), '%Y %W %w')
return date_of_serial.strftime('%B %Y')
else:
logger.debug("Received serial {0} is not the correct length".format(serial))
return ""
def script_main(script):
"""
| MULTIPLE device script
| Author: Jamie Caesar
| Email: jcaesar@presidio.com
This script will connect to all devices in the provided CSV file and create an output report (also CSV format)
containing inventory data about the devices, such as hostname, model number, code version, serial number,
manufacture date, etc.
This script checks that it will NOT be run in a connected tab.
:param script: A subclass of the scripts.Script object that represents the execution of this particular script
(either CRTScript or DirectScript)
:type script: scripts.Script
"""
session = script.get_main_session()
# If this is launched on an active tab, disconnect before continuing.
logger.debug("<M_SCRIPT> Checking if current tab is connected.")
if session.is_connected():
logger.debug("<M_SCRIPT> Existing tab connected. Stopping execution.")
raise scripts.ScriptError("This script must be launched in a not-connected tab.")
# Load a device list
device_list = script.import_device_list()
if not device_list:
return
# Check settings if we should use a proxy/jumpbox
use_proxy = script.settings.getboolean("Global", "use_proxy")
default_proxy_session = script.settings.get("Global", "proxy_session")
# ######################################## START DEVICE CONNECT LOOP ###########################################
# Create a filename to keep track of our connection logs, if we have failures. Use script name without extension
failed_log = session.create_output_filename("{0}-LOG".format(script_name.split(".")[0]), include_hostname=False)
device_data = []
for device in device_list:
hostname = device['Hostname']
protocol = device['Protocol']
username = device['Username']
password = device['Password']
enable = device['Enable']
try:
proxy = device['Proxy Session']
except KeyError:
proxy = None
if not proxy and use_proxy:
proxy = default_proxy_session
logger.debug("<M_SCRIPT> Connecting to {0}.".format(hostname))
try:
script.connect(hostname, username, password, protocol=protocol, proxy=proxy)
session = script.get_main_session()
device_data.extend(per_device_work(session, enable))
script.disconnect()
except scripts.ConnectError as e:
with open(failed_log, 'a') as logfile:
logfile.write("<M_SCRIPT> Connect to {0} failed: {1}\n".format(hostname, e.message.strip()))
session.disconnect()
except sessions.InteractionError as e:
with open(failed_log, 'a') as logfile:
logfile.write("<M_SCRIPT> Failure on {0}: {1}\n".format(hostname, e.message.strip()))
session.disconnect()
except sessions.UnsupportedOSError as e:
with open(failed_log, 'a') as logfile:
logfile.write("<M_SCRIPT> Unsupported OS on {0}: {1}\n".format(hostname, e.message.strip()))
session.disconnect()
except Exception as e:
with open(failed_log, 'a') as logfile:
logfile.write("<M_SCRIPT> Exception on {0}: {1} ({2})\n".format(hostname, e.message.strip(), e))
session.disconnect()
# ######################################### END DEVICE CONNECT LOOP ############################################
# Write complete output to a CSV file
session = script.get_main_session()
output_filename = session.create_output_filename("INVENTORY_REPORT", ext='.csv', include_hostname=False)
header_row = ['HOSTNAME', 'IP', 'MODEL', 'VERSION', 'SERIAL', 'MANUFACTURE_DATE', 'UPTIME',
'LAST_REBOOT_REASON', 'HARDWARE', 'IMAGE']
utilities.list_of_dicts_to_csv(device_data, output_filename, header_row)
def per_device_work(session, enable_pass):
"""
This function contains the code that should be executed on each device that this script connects to. It is called
after establishing a connection to each device in the loop above.
You can either put your own code here, or if there is a single-device version of a script that performs the correct
task, it can be imported and called here, essentially making this script connect to all the devices in the chosen
CSV file and then running a single-device script on each of them.
"""
script = session.script
interesting_keys = ['HARDWARE', 'HOSTNAME', 'MODEL', 'VERSION', 'SERIAL', 'UPTIME', 'LAST_REBOOT_REASON', 'IMAGE']
# Validate device is of a proper OS
supported_os = ['IOS', 'NXOS', 'ASA']
session.start_cisco_session(enable_pass=enable_pass)
session.validate_os(supported_os)
# Select the appropriate template to process show version data
if session.os == 'IOS':
ver_template_file = script.get_template('cisco_ios_show_version.template')
elif session.os == 'NXOS':
ver_template_file = script.get_template('cisco_nxos_show_version.template')
elif session.os == 'ASA':
ver_template_file = script.get_template('cisco_asa_show_version.template')
else:
raise sessions.UnsupportedOSError("{0} isn't a supported OS.".format(session.os))
# Process Show Version data
raw_version = session.get_command_output('show version')
fsm_output = utilities.textfsm_parse_to_dict(raw_version, ver_template_file)
if len(fsm_output) > 1:
raise sessions.InteractionError("Received multiple entries from a single device, which should not happen.")
else:
ver_info = fsm_output[0]
# For NXOS get parse 'show inventory' for model and serial number
if session.os == 'NXOS':
ver_info['HOSTNAME'] = session.hostname
logger.debug("<M_SCRIPT> NXOS device, getting 'show inventory'.")
raw_inv = session.get_command_output('show inventory')
inv_template_file = script.get_template('cisco_nxos_show_inventory.template')
inv_info = utilities.textfsm_parse_to_dict(raw_inv, inv_template_file)
for entry in inv_info:
if entry['NAME'] == "Chassis":
logger.debug("<M_SCRIPT> Adding {0} as model number".format(entry['PID']))
ver_info['MODEL'] = entry['PID']
logger.debug("<M_SCRIPT> Adding {0} as serial number".format(entry['SN']))
ver_info['SERIAL'] = entry['SN']
break
elif session.os == 'ASA':
logger.debug("<M_SCRIPT> ASA device, writing 'N/A' for last reboot reason.")
# For ASA put a N/A reload reason since ASA doesn't have this output
ver_info['LAST_REBOOT_REASON'] = "N/A"
# If we don't have a model number in older 'show ver' extract it from the hardware column.
if not ver_info['MODEL']:
model = ver_info['HARDWARE'].split(',')[0]
logger.debug("<M_SCRIPT> ASA device without model, using {0}".format(model))
ver_info['MODEL'] = model
elif session.os == 'IOS':
# Expand multiple serial numbers found in stacks, or just remove lists for serial and model if only 1 device
logger.debug("<M_SCRIPT> IOS device, writing list of serials/models to separate entries")
num_in_stack = len(ver_info['SERIAL'])
if len(ver_info['MODEL']) != num_in_stack:
# For older IOS, we may not get a model number, but we'll pick up the hardware. As long as this happens
# when only 1 serial is detected (not a switch stack), then just use the HARDWARE for the model number.
if len(ver_info['MODEL']) == 0 and num_in_stack == 1 and ver_info['HARDWARE']:
ver_info['MODEL'] = [ver_info['HARDWARE']]
else:
logger.debug("<M_SCRIPT> List of Serials & Models aren't the same length. Likely TextFSM parsing problem.")
raise sessions.InteractionError("Received {0} serial nums and only {1} model nums in output."
.format(num_in_stack, len(ver_info['MODEL'])))
new_output = []
for x in range(num_in_stack):
stack_subset = dict((key, ver_info[key]) for key in interesting_keys)
stack_subset['HOSTNAME'] = "{0}-{1}".format(ver_info['HOSTNAME'], x+1)
stack_subset['SERIAL'] = ver_info['SERIAL'][x]
stack_subset['MODEL'] = ver_info['MODEL'][x]
new_output.append(stack_subset)
logger.debug("Created an entry for {0}/{1}".format(stack_subset['MODEL'], stack_subset['SERIAL']))
fsm_output = new_output
# Create output data structure with only the keys that we need.
inv_data = []
logger.debug("Creating list of dictionaries to return, and adding manufacture dates.")
for entry in fsm_output:
subset = dict((key, entry[key]) for key in interesting_keys)
subset['MANUFACTURE_DATE'] = get_manufacture_date(subset['SERIAL'])
subset['IP'] = session.remote_ip
inv_data.append(subset)
# End session on the Cisco device
session.end_cisco_session()
return inv_data
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Run script's main logic against the script object
script_main(crt_script)
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Run script's main logic against the script object
script_main(direct_script)
# Shutdown logging after
logging.shutdown() | {
"repo_name": "jamiecaesar/SecureCRT",
"path": "m_inventory_report.py",
"copies": "2",
"size": "11982",
"license": "apache-2.0",
"hash": 7648009640914017000,
"line_mean": 45.80859375,
"line_max": 123,
"alpha_frac": 0.6266065765,
"autogenerated": false,
"ratio": 4.071355759429154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5697962335929153,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
import csv
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(script):
"""
Author: Michael Ethridge
Email: michael@methridge.com
This script will import a list of sessions to create in SecureCRT from a CSV file. It does not connect to any
devices.
:param script: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type script: scripts.Script
"""
# Get input CSV, must contain Session Name and IP. Can also have
# Protocol and folder
sessions_csv = ""
sessions_csv = script.file_open_dialog("Please select your CSV Import file", "Open", sessions_csv,
"CSV Files (*.csv)|*.csv|")
# Check if got an input file name or not
if sessions_csv != "":
# Set couters
count = 0
skipped = 0
# Open our input file
with open(sessions_csv, 'rb') as csv_import_file:
# Read in CSV as DICT
import_reader = csv.DictReader(csv_import_file)
# Process each row and create the session
for row in import_reader:
# If we don't have a hostname / IP skip the row
if row['hostname'] == "":
skipped += 1
continue
# If session name is blank, set it to hostname / IP
if row['session_name'] == "":
row['session_name'] = row['hostname']
# If protocol is blank set to SSH2
if row['protocol'] == "":
row['protocol'] = "SSH2"
# If folder is blank set to '_imports'
if row['folder'] == "":
row['folder'] = "_imports"
# Create Session
script.create_new_saved_session(row['session_name'], row['hostname'], row['protocol'], row['folder'])
count += 1
# Display summary of created / skipped sessions
setting_msg = "{0} sessions created\n{1} sessions skipped (no Hostname / IP)".format(count, skipped)
script.message_box(setting_msg, "Sessions Created", scripts.ICON_INFO)
else:
# We didn't get an input file so ask to generate an example and exit.
result = script.message_box("Do you want to generate an example CSV file?", "Generate CSV",
scripts.ICON_QUESTION | scripts.BUTTON_YESNO)
if result == scripts.IDNO:
return
else:
# Create an example input filename by replacing .py in script
# name with .csv
example_file = os.path.normpath(os.path.join(script_dir, script_name.replace(".py", ".csv")))
# Write out example
with open(example_file, 'wb') as ex_file:
exWriter = csv.writer(ex_file)
exWriter.writerow(
['session_name', 'hostname', 'protocol', 'folder']
)
exWriter.writerow(
['switch1', '10.10.10.10', 'SSH2',
'Customer1/Site1/Building1/IDF1']
)
exWriter.writerow(
['switch2', '10.10.20.10', 'SSH2',
'Customer1/Site1/Building1/IDF2']
)
exWriter.writerow(
['router1', '10.10.10.1', 'SSH2',
'Customer1/Site1/Building1/IDF1']
)
# Show where example file was created
setting_msg = (
"No input file selected\n"
"Example Import file, {0}, created in directory:\n{1}\n\n").format(example_file, script_dir)
script.message_box(setting_msg, "Example Input Created", scripts.ICON_INFO)
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
crt_script = scripts.CRTScript(crt)
script_main(crt_script)
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
direct_script = scripts.DebugScript(os.path.realpath(__file__))
script_main(direct_script) | {
"repo_name": "jamiecaesar/SecureCRT",
"path": "import_sessions_from_csv.py",
"copies": "3",
"size": "5178",
"license": "apache-2.0",
"hash": -1385163621106916400,
"line_mean": 40.432,
"line_max": 119,
"alpha_frac": 0.551178061,
"autogenerated": false,
"ratio": 4.3330543933054395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6384232454305439,
"avg_score": null,
"num_lines": null
} |
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import sessions
from securecrt_tools import utilities
from securecrt_tools import ipaddress
from securecrt_tools.message_box_const import *
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def add_commands(session, check_mode, commands_to_add):
# If we are pushing commands to a real device, save our "Before" configuration.
if not check_mode:
before_filename = session.create_output_filename("1-show-run-BEFORE")
session.write_output_to_file("show run", before_filename)
if commands_to_add:
if check_mode:
# If in Check Mode, only generate config updates and write to a file.
logger.debug("<ADD_GLOBAL_CONFIG> CHECK MODE: Generating config")
command_string = ""
command_string += "configure terminal\n"
for command in commands_to_add:
command_string += "{}\n".format(command.strip())
command_string += "end\n"
config_filename = session.create_output_filename("PROPOSED_CONFIG")
with open(config_filename, 'w') as output_file:
output_file.write(command_string)
else:
config_filename = session.create_output_filename("2-CONFIG_RESULTS")
session.send_config_commands(commands_to_add, output_filename=config_filename)
session.save()
# Save our "After" configuration.
after_filename = session.create_output_filename("3-show-run-AFTER")
session.write_output_to_file("show run", after_filename)
def script_main(session):
"""
| SINGLE device script
| Author: Jamie Caesar
| Email: jcaesar@presidio.com
This script will add global configuration commands to the connected device. The commands sent will depend on the
operating system of the connected device. For example, IOS devices get the commands listed in the 'ios' section of
the settings for this script. If the device is running NX-OS, it will get the commands from the 'nxos' section of
the settings, etc.
This script will prompt you to run in "Check Mode", where the configuration changes the script would be pushed to
the device are ONLY written to a file and NO CHANGES will be made to the device. If you select "No" when prompted
this script will push the configuration changes to the device. Also, when the changes are pushed to the device this
script will save the running config before and after the changes are made, and will also output a log of the
configuration session showing all the commands pushed.
**Script Settings** (found in settings/settings.ini):
* | **show_instructions** - When True, displays a pop-up upon launching the script
| explaining where to modify the list of commands sent to devices. This window also
| prompts the user if they want to continue seeing this message. If not, the script
| changes this setting to False.
* | **ios** - A comma separated list of commands that will be sent to IOS devices.
* | **ios-xr** - A comma separated list of commands that will be sent to IOS-XR devices.
* | **nxos** - A comma separated list of commands that will be sent to NX-OS devices.
* | **asa** - A comma separated list of commands that will be sent to ASA devices.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# ----------------------------------- GET VALUES FROM SETTINGS -----------------------------------
settings_header = "add_global_config"
# Display instructions message, unless settings prevent it
show_instructions = script.settings.getboolean(settings_header, "show_instructions")
if show_instructions:
response = script.message_box("The list of commands sent to each device (per network OS) can be edited in the "
"'settings/settings.ini' file in the main securecrt-tools directory.\nSee the "
"documentation for this script ('docs/index.html') for more details.\n\n"
"Do you want to stop seeing this message?",
"Instructions", ICON_QUESTION + BUTTON_YESNO)
if response == IDYES:
script.settings.update(settings_header, "show_instructions", False)
# ----------------------------------- PROMPT FOR CHECK-MODE -----------------------------------
# Ask if this should be a test run (generate configs only) or full run (push updates to devices)
check_mode_message = "THIS SCRIPT WILL MAKE CONFIG CHANGES ON THE REMOTE DEVICES!!!!!\n\n" \
"Do you want to run this script in check mode instead? (Only generate configs)\n" \
"\n" \
"Yes = Connect to device and write change scripts to a file only. NO CHANGES.\n" \
"No = Connect to device and PUSH CONFIGURATION CHANGES TO ALL DEVICES"
logger.debug("<ADD_GLOBAL_CONFIG> Prompting the user to run in check mode.")
result = script.message_box(check_mode_message, "Run in Check Mode?", ICON_QUESTION + BUTTON_YESNOCANCEL)
if result == IDYES:
check_mode = True
elif result == IDNO:
check_mode = False
else:
return
# ----------------------------------- MAIN SCRIPT LOGIC -----------------------------------
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
commands_to_add = script.settings.getlist(settings_header, session.os)
logger.debug("<ADD_GLOBAL_CONFIG> Commands to send:\n{}".format(str(commands_to_add)))
if commands_to_add:
add_commands(session, check_mode, commands_to_add)
else:
logger.debug("<ADD_GLOBAL_CONFIG> No commands to send to {}, skipping device.\n".format(session.hostname))
script.message_box("There are no commands for OS type: {}".format(session.os), "No Commands", ICON_STOP)
# Return terminal parameters back to the original state.
session.end_cisco_session()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
try:
script_main(crt_session)
except Exception:
crt_session.end_cisco_session()
raise
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
| {
"repo_name": "jamiecaesar/SecureCRT",
"path": "s_add_global_config.py",
"copies": "2",
"size": "8289",
"license": "apache-2.0",
"hash": -377276276363865340,
"line_mean": 48.0473372781,
"line_max": 119,
"alpha_frac": 0.6400048257,
"autogenerated": false,
"ratio": 4.308212058212058,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5948216883912059,
"avg_score": null,
"num_lines": null
} |
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
def import_network(name):
'''
Import an example network.
'''
import importlib
if name not in all_networks():
raise ImportError('nns: NN {} has not been defined!'.format(name))
netmod = importlib.import_module('.' + name, 'nn_dataflow.nns')
network = netmod.NN
return network
def all_networks():
'''
Get all defined networks.
'''
import os
nns_dir = os.path.dirname(os.path.abspath(__file__))
nns = [f[:-len('.py')] for f in os.listdir(nns_dir)
if f.endswith('.py') and not f.startswith('__')]
return list(sorted(nns))
def add_lstm_cell(network, name, size, xin, cin=None, hin=None):
'''
Add a LSTM cell named `name` to the `network`, with the dimension `size`.
`xin`, `cin`, `hin` are the layers' names whose outputs are x_t, C_{t-1},
h_{t-1}, respectively. Return the layers' names whose outputs are C_t, h_t.
'''
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer, EltwiseLayer
if not isinstance(network, Network):
raise TypeError('add_lstm_cell: network must be a Network instance.')
if cin is None:
cin = '{}_cinit'.format(name)
network.add_ext(cin, InputLayer(size, 1))
if hin is None:
hin = '{}_hinit'.format(name)
network.add_ext(hin, InputLayer(size, 1))
if (cin not in network) or (hin not in network) or (xin not in network):
raise ValueError('add_lstm_cell: cin {}, hin {}, xin {} must all be '
'in the network.'.format(cin, hin, xin))
def gate_name(gate):
''' Name of a gate. '''
return '{}_{}gate'.format(name, gate)
# Candidate.
cand_name = '{}_cand'.format(name)
prevs = (hin, xin) if hin else (xin,)
network.add(cand_name, FCLayer(len(prevs) * size, size), prevs=prevs)
# Three gates.
prevs = (hin, xin) if hin else (xin,)
for g in ['i', 'f', 'o']:
network.add(gate_name(g), FCLayer(len(prevs) * size, size), prevs=prevs)
# C_t.
cout_name = '{}_cout'.format(name)
cout_f_name = cout_name + '_f'
prevs = (cin, gate_name('f')) if cin else (gate_name('f'),)
network.add(cout_f_name, EltwiseLayer(size, 1, len(prevs)), prevs=prevs)
cout_i_name = cout_name + '_i'
prevs = (cand_name, gate_name('i'))
network.add(cout_i_name, EltwiseLayer(size, 1, 2), prevs=prevs)
prevs = (cout_i_name, cout_f_name)
network.add(cout_name, EltwiseLayer(size, 1, 2), prevs=prevs)
# h_t.
hout_name = '{}_hout'.format(name)
prevs = (cout_name, gate_name('o'))
network.add(hout_name, EltwiseLayer(size, 1, 2), prevs=prevs)
return cout_name, hout_name
| {
"repo_name": "stanford-mast/nn_dataflow",
"path": "nn_dataflow/nns/__init__.py",
"copies": "1",
"size": "3346",
"license": "bsd-3-clause",
"hash": -3860328478447068000,
"line_mean": 33.8541666667,
"line_max": 80,
"alpha_frac": 0.6338912134,
"autogenerated": false,
"ratio": 3.1154562383612663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4249347451761266,
"avg_score": null,
"num_lines": null
} |
""" $lic$
Copyright (c) 2016-2021, Mingyu Gao
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from contextlib import contextmanager
import matplotlib.backends.backend_pdf
from .format import paper_plot
def plot_setup(name, figsize=None, fontsize=9, font='paper'):
""" Setup a PDF page for plot.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
"""
paper_plot(fontsize=fontsize, font=font)
if not name.endswith('.pdf'):
name += '.pdf'
pdfpage = matplotlib.backends.backend_pdf.PdfPages(name)
fig = matplotlib.pyplot.figure(figsize=figsize)
return pdfpage, fig
def plot_teardown(pdfpage, fig=None):
""" Tear down a PDF page after plotting.
pdfpage: PDF page.
fig: the figure to save.
"""
pdfpage.savefig(fig)
pdfpage.close()
@contextmanager
def plot_open(name, figsize=None, fontsize=9, font='paper'):
""" Open a context of PDF page for plot, used for the `with` statement.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
"""
pdfpage, fig = plot_setup(name, figsize=figsize, fontsize=fontsize,
font=font)
yield fig
plot_teardown(pdfpage, fig)
| {
"repo_name": "gaomy3832/easypyplot",
"path": "easypyplot/pdf.py",
"copies": "1",
"size": "2251",
"license": "bsd-3-clause",
"hash": -5261756350559406000,
"line_mean": 35.3064516129,
"line_max": 79,
"alpha_frac": 0.7067969791,
"autogenerated": false,
"ratio": 3.828231292517007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953082381918776,
"avg_score": 0.016389177939646202,
"num_lines": 62
} |
"""
Create AII FreeIPA plugin
- creates the 'ipa aii' and 'ipa aii_find' commands
"""
import re
from ipalib import Command, Bool, Str
from ipalib.plugins.dns import add_records_for_host
from ipalib.errors import AlreadyActive, AlreadyInactive
class aii(Command): # class name has to be lowercase
"""aii command"""
takes_args = ('shorthostname', 'domain')
takes_options = (
Bool('install', default=False, required=False, autofill=True,
doc='Prepare for installation, returns random OTP',
),
Bool('disable', default=False, required=False, autofill=True,
doc='Disable the host in IPA',
),
Str('ip', default=None, required=False, autofill=True,
doc='Set the IP (implies DNS configuration; don\'t use it if DNS is not enabled/wanted)'
),
)
def __init__(self, *args, **kwargs):
"""Customise the __init__ a bit"""
super(aii, self).__init__(*args, **kwargs)
self.fqdns = None
def find_fqdns(self):
"""Update list of hosts"""
res = self.Command.host_find()
self.fqdns = {}
for host in res['result']:
for fqdn in host['fqdn']:
self.fqdns[fqdn] = host
self.log.debug('Found fqdns %s' % self.fqdns)
self.log.debug('Found fqdns %s' % ', '.join(self.fqdns.keys()))
return self.fqdns
def host_in_ipa(self, hostname, force_fqdn=False):
"""Check if hostname is known in IPA"""
if self.fqdns is None or force_fqdn:
self.find_fqdns()
res = hostname in self.fqdns
if res:
host = self.fqdns[hostname]
self.log.debug('host %s found in IPA (has_password %s; has_keytab %s)' % (
hostname, host['has_password'], host['has_keytab']))
else:
self.log.debug('host %s NOT found in IPA' % (hostname))
return res
def disable_host(self, hostname):
"""Disable the host (removes keytab)"""
res = {}
if self.host_in_ipa(hostname):
try:
disable = api.Command.host_disable(hostname)
res['disable'] = disable['result']
self.log.debug('host_disable on %s OK.' % hostname)
except AlreadyInactive:
self.log.debug('Host %s already inactive.' % hostname)
else:
self.log.debug('No need to disable unknown host %s.' % s)
self.log.info('Host %s disabled.' % hostname)
return res
def aii_install(self, hostname):
"""Take action to allow proper installation"""
res = {}
do_add = True
if self.host_in_ipa(hostname):
host = self.fqdns[hostname]
if host['has_keytab']:
self.log.error('Can\'t install host %s, already in IPA (disable first?)' % hostname)
raise AlreadyActive
else:
self.log.debug('Host %s in IPA, but no keytab' % hostname)
do_add = False
if do_add:
self.log.debug('host_add %s' % hostname)
added = api.Command.host_add(hostname)
res['add'] = added['result']
# modify to set random password
self.log.debug('host_mod %s random password' % hostname)
# do not print/log res, it contains a password
modified = api.Command.host_mod(hostname, random=True)
res['modify'] = modified['result']
return res
def run(self, shorthostname, domain, **options):
"""
Implemented as frontend command (ie no forward/execute)
"""
hostname = unicode("%s.%s" % (shorthostname, domain))
self.log.debug('AII called with hostname %s (options %s)' % (hostname, options))
ip = options.get('ip', None)
res = {}
# first try to disable (e.g. in case --install=1 --disable=1 is passed)
if options.get('disable', False):
self.log.debug('Going to disable')
res.update(self.disable_host(hostname))
# check for install
if options.get('install', False):
self.log.debug('Going to install')
if ip is not None:
self.log.debug('Adding ip %s for hostname %s' % (ip, hostname))
add_records_for_host(shorthostname, domain, [ip])
# do not print/log res, it contains a password
res.update(self.aii_install(hostname))
# always return like this
return dict(result=res)
def output_for_cli(self, textui, result, shorthostname, domain, **options):
if options.get('install', False) and 'modify' in result['result'] and 'randompassword' in result['result']['modify']:
# use pop to remove it (eg in case we use it for logging)
textui.print_plain('randompassword = %s' % (result['result']['modify'].pop('randompassword')))
textui.print_plain('%s.%s = %r (options %s)' % (shorthostname, domain, result, options))
class aii_find(Command): # class name has to be lowercase
"""aii_find command"""
takes_options = (
Bool('detail', default=False, required=False, autofill=True,
doc='Show details',
),
Bool('all', default=False, required=False, autofill=True,
doc='Use --all option (implies detail)',
),
Bool('raw', default=False, required=False, autofill=True,
doc='Use --all --raw option (implies detail)',
),
Str('hostname', default=None, required=False, autofill=True,
doc='Check this host (ignores hostregex)',
),
Str('hostregex', default=None, required=False, autofill=True,
doc='Show host(s) matching this regex (might be slow)',
),
)
def run(self, **options):
"""
Show all hosts with no keytab, filtered with hostregex
Implemented as frontend command (ie no forward/execute)
"""
opts = {}
opts['raw'] = options.get('raw', False)
opts['all'] = options.get('all', False) or opts['raw']
self.log.debug('Options all %s raw %s' % (opts['all'], opts['raw']))
reg = None
if 'hostname' in options:
opts['fqdn'] = options['hostname']
self.log.debug('Set hostname %s' % opts['fqdn'])
elif 'hostregex' in options:
reg = re.compile(r'' + options.get('hostregex'))
self.log.debug('Using regexp pattern %s' % reg.pattern)
found = self.Command.host_find(**opts)
res = {
'fqdns': [],
'details' : {}
}
detail = options.get('detail', False) or opts['all'] # already deals with raw
if 'result' in found and len(found['result']):
for host in found['result']:
fqdns = host.pop('fqdn') # this is a tuple!
self.log.debug('host fqdns found %s ' % (fqdns))
for fqdn in fqdns:
if (reg is not None) and (not reg.search(fqdn)):
continue
res['fqdns'].append(fqdn)
if detail:
res['details'][fqdn] = host
else:
self.log.debug('No results from host_find')
# sort the hostnames before returning them
res['fqdns'].sort()
return dict(result=res)
def output_for_cli(self, textui, result, **options):
detail = options.get('detail', False) or options.get('all', False) or options.get('raw', False)
fqdns = result['result']['fqdns']
if detail:
# print per host details
for fqdn in fqdns:
textui.print_plain("Hostname %s" % (fqdn))
details = result['result']['details'][fqdn].items()
details.sort(key=lambda x: x[0])
textui.print_keyval(details)
else:
# print list of hostnames
textui.print_plain(" ".join(fqdns))
if __name__ == '__main__':
from ipalib import create_api
api = create_api()
else:
from ipalib import api
api.register(aii)
api.register(aii_find)
if __name__ == '__main__':
api.finalize()
from ipalib import cli
textui = cli.textui()
args = [unicode('somehost'), unicode('somesubdomain.somedomain')]
options = {
'install': True,
}
result = api.Command.aii(*args, **options)
api.Command.aii.output_for_cli(textui, result, *args, **options)
| {
"repo_name": "kwaegema/aii",
"path": "aii-freeipa/src/main/python/aii.py",
"copies": "3",
"size": "8562",
"license": "apache-2.0",
"hash": 8055869638687578000,
"line_mean": 34.3801652893,
"line_max": 125,
"alpha_frac": 0.5568792338,
"autogenerated": false,
"ratio": 3.860234445446348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5917113679246349,
"avg_score": null,
"num_lines": null
} |
###${MARKDOWN}
# This tutorial is for a basic architecture search using a random searcher.
#
# If running this tutorial with an MPI based communicator, use the
# [launch_mpi_based_search.sh](examples/tutorials/xx.full_search/launch_mpi_based_search.sh)
# script to run the search. Run the script as
# `./examples/tutorials/xx.full_search/launch_mpi_based_search.sh NP` where NP
# is the number of evaluator workers + 1 (to account for the searcher process)
#
# If running this tutorial with a file based communicator, use the
# [launch_file_based_search.sh](examples/tutorials/xx.full_search/launch_file_based_search.sh)
# script to run the search. Run the script as
# `./examples/tutorials/xx.full_search/launch_file_based_search.sh NP`
import argparse
from deep_architect.contrib.misc.datasets.loaders import load_mnist
from deep_architect.contrib.misc.datasets.dataset import InMemoryDataset
from deep_architect.searchers.random import RandomSearcher
from deep_architect.contrib.misc.evaluators.tensorflow.classification import SimpleClassifierEvaluator
import deep_architect.modules as mo
import deep_architect.contrib.misc.search_spaces.tensorflow.dnn as dnn
import deep_architect.searchers.common as se
from deep_architect.contrib.communicators.communicator import get_communicator
parser = argparse.ArgumentParser(description='Run an architecture search.')
parser.add_argument('--comm',
'-c',
choices=['mpi', 'file'],
required=True,
default='mpi')
parser.add_argument('--num-procs', '-n', type=int, default=2)
args = parser.parse_args()
# First, create the communicator. This communicator is used by by to master to
# send candidate architectures to the workers to evaluate, and by the workers
# to send back the results for the architectures they evaluated. Currently,
# the communicator can be MPI based or file based (file based requires the
# processes to share a filesystem).
comm = get_communicator(args.comm, num_procs=args.num_procs)
# This is the number of total models to be evaluated in search
num_total_models = 25
# Now we set up the datasets and the search space factory.
X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',
normalize_range=True)
train_dataset = InMemoryDataset(X_train, y_train, True)
val_dataset = InMemoryDataset(X_val, y_val, False)
ssf = mo.SearchSpaceFactory(lambda: dnn.dnn_net(10))
# Each process should have a unique rank. The process with rank 0 will act as the
# master process that is in charge of the searcher. Every other process acts
# as a worker that evaluates architectures sent to them.
if comm.get_rank() == 0:
searcher = RandomSearcher(ssf.get_search_space)
models_sampled = 0
killed = 0
finished = 0
# This process keeps going as long as we have not received results for all sampled
# models and not all the worker processes have been killed. Kill signals start
# being sent out once the searcher has finished sampling the number of models
# specified by the `num_total_models` parameter
while finished < models_sampled or killed < comm.num_workers:
if models_sampled < num_total_models:
# Now, we check the communicator to see if worker queue is ready for a new
# architecture. If so, we publish an architecture to the worker queue.
if comm.is_ready_to_publish_architecture():
_, _, vs, se_token = searcher.sample()
comm.publish_architecture_to_worker(vs, models_sampled,
se_token)
models_sampled += 1
# If we are over the specified number of models to be sampled, we
# send a kill signal to each worker. Each worker should only consume
# one kill signal, so if the number of kill signals the searcher
# sends is equal to the number of workers, all workers should have
# received a kill signal
else:
if comm.is_ready_to_publish_architecture(
) and killed < comm.num_workers:
comm.kill_worker()
killed += 1
# After sending the appropriate messages to the workers, the master process
# needs to go through each worker and see if it has received any new results to
# update the searcher with
for worker in range(comm.num_workers):
msg = comm.receive_results_in_master(worker)
if msg is not None:
results, model_id, searcher_eval_token = msg
searcher.update(results['validation_accuracy'],
searcher_eval_token)
finished += 1
print('Model %d accuracy: %f' %
(model_id, results['validation_accuracy']))
# At this point, all of the workers should be killed, and the searcher should
# have evaluated all the architectures it needed to finish its search.
# print('Best architecture accuracy: %f' % searcher.best_acc)
# print('Best architecture params: %r' % searcher.best_vs)
else:
evaluator = SimpleClassifierEvaluator(train_dataset,
val_dataset,
10,
'./temp',
max_num_training_epochs=2)
# This process keeps going until it receives a kill signal from the master
# process. At that point, it breaks out of its loop and ends.
while (True):
arch = comm.receive_architecture_in_worker()
if arch is None:
break
vs, evaluation_id, searcher_eval_token = arch
# In order to evaluate the architecture sent by the searcher, we create a new
# unspecified search space, and recreate the architecture using the values of
# the hyperparameters received by the worker.
inputs, outputs = ssf.get_search_space()
se.specify(outputs, vs)
results = evaluator.eval(inputs, outputs)
comm.publish_results_to_master(results, evaluation_id,
searcher_eval_token)
| {
"repo_name": "negrinho/deep_architect",
"path": "dev/tutorials/full_search/search.py",
"copies": "1",
"size": "6160",
"license": "mit",
"hash": -6946065070931441000,
"line_mean": 46.0229007634,
"line_max": 102,
"alpha_frac": 0.6660714286,
"autogenerated": false,
"ratio": 4.134228187919463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013061553715722658,
"num_lines": 131
} |
$[/myProject/wsadmin_scripts/preamble.py]
clusterName = '''
$[wasClusterName]
'''.strip()
clusterMembers = '''
$[wasClusterMembers]
'''.strip()
syncNodes = '''
$[wasSyncNodes]
'''.strip()
if not isClusterExists(clusterName):
bailOut("Cluster %s does not exist" % (clusterName))
sys.exit(1)
# Cluster exists, continue
parsedMembersList = parseServerListAsList(clusterMembers, {'filterUnique': 1})
# check members
clusterMembers = getClusterMembersAsList(clusterName);
errors = 0
for member in parsedMembersList:
if member not in clusterMembers:
exists = 'exists'
errors = 1
if not isServerExists(member['Node'], member['Server']):
exists = 'does not exist'
logError("Server %s:%s (%s) is not a member of cluster %s, please, check your input" % (member['Node'], member['Server'], exists, clusterName))
if errors > 0:
sys.exit(1)
for member in parsedMembersList:
try:
AdminClusterManagement.deleteClusterMember(clusterName, member['Node'], member['Server'])
logSummary("Cluster member %s on node %s has been removed from cluster %s and deleted" % (member['Server'], member['Node'], clusterName))
except:
forwardException(getExceptionMsg())
sys.exit(1)
AdminConfig.save()
if toBoolean(syncNodes):
syncActiveNodes()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/Cluster/remove_cluster_members.py",
"copies": "1",
"size": "1335",
"license": "apache-2.0",
"hash": 5390117786280776000,
"line_mean": 28.6666666667,
"line_max": 151,
"alpha_frac": 0.6764044944,
"autogenerated": false,
"ratio": 3.7083333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4884737827733333,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
clusterName = '''
$[wasClusterName]
'''.strip()
genUniquePorts = '''
$[wasClusterMembersGenUniquePorts]
'''.strip()
clusterMemberWeight = '''
$[wasClusterMemberWeight]
'''.strip()
membersList = '''
$[wasClusterMembersList]
'''.strip()
syncNodes = '''
$[wasSyncNodes]
'''.strip()
# Check if cluster is empty. Cluster members cannot be created of no first cluster member is present.
if not isClusterExists(clusterName):
bailOut("Cluster %s does not exist" % (clusterName))
if len(getClusterMembers(clusterName)) < 1:
bailOut("Can't add cluster members to empty cluster %s. Please, create first cluster member and try again" % (clusterName))
parsedMembersList = []
try:
parsedMembersList = parseServerListAsList(membersList, {'filterUnique': 1})
except:
forwardException(getExceptionMsg())
sys.exit(1)
for server in parsedMembersList:
try:
createMemberParams = {
'clusterName': clusterName,
'targetNode': server['Node'],
'targetName': server['Server'],
'memberWeight': clusterMemberWeight,
'genUniquePorts': genUniquePorts
}
createClusterMembers(createMemberParams)
except:
forwardException(getExceptionMsg())
sys.exit(1)
logSummary("Server %s on node %s has been created and added to %s cluster" % (server['Server'], server['Node'], clusterName))
AdminConfig.save()
if toBoolean(syncNodes):
syncActiveNodes()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/Cluster/create_cluster_members.py",
"copies": "1",
"size": "1496",
"license": "apache-2.0",
"hash": -6887610253830054000,
"line_mean": 26.2,
"line_max": 129,
"alpha_frac": 0.6831550802,
"autogenerated": false,
"ratio": 3.7493734335839597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493252851378396,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
clusterName = '''
$[wasClusterName]
'''.strip()
preferLocal = '''
$[wasPreferLocal]
'''.strip()
createFirstMember = '''
$[wasCreateFirstClusterMember]
'''.strip()
firstMemberNode = '''
$[wasFirstClusterMemberNode]
'''.strip()
firstMemberName = '''
$[wasFirstClusterMemberName]
'''.strip()
firstMemberWeight = '''
$[wasFirstClusterMemberWeight]
'''.strip()
firstMemberGenUniquePorts = '''
$[wasFirstClusterMemberGenUniquePorts]
'''.strip()
promotionPolicy = '''
$[wasServerResourcesPromotionPolicy]
'''.strip()
creationPolicy = '''
$[wasFirstClusterMemberCreationPolicy]
'''.strip()
templateName = '''
$[wasFirstClusterMemberTemplateName]
'''.strip()
sourceServerName = '''
$[wasSourceServerName]
'''.strip()
addClusterMembers = '''
$[wasAddClusterMembers]
'''.strip()
membersGenUniquePorts = '''
$[wasClusterMembersGenUniquePorts]
'''.strip()
memberWeight = '''
$[wasClusterMemberWeight]
'''.strip()
membersList = '''
$[wasClusterMembersList]
'''.strip()
syncNodes = '''
$[wasSyncNodes]
'''.strip()
# 1. parameters check
# Checking all parameters if create 1st member is set
if toBoolean(createFirstMember):
if not creationPolicy:
bailOut("Creation Policy is mandatory when create 1st cluster member is chosen")
if creationPolicy not in ['existing', 'convert', 'template']:
bailOut("Creation policy should be one of: existing, convert or template. Got %s" % (creationPolicy))
if creationPolicy in ['template', 'existing'] and (not firstMemberName or not firstMemberNode):
bailOut("First Member Name and First Member Node should be provided when create 1st cluster member is chosen")
if not promotionPolicy:
bailOut("Promotion Policy is mandatory when create first cluster member is chosen");
if promotionPolicy and promotionPolicy not in ['cluster', 'server', 'both']:
bailOut("Promotion policy should be cluster, server, or both, got %s" % (promotionPolicy))
if creationPolicy == 'existing' and not sourceServerName:
bailOut("Source Server Name is mandatory when Creation Policy is set to existing")
if creationPolicy == 'template' and not templateName:
bailOut("First Server Template Name is mandatory when Creation Policy is set to template")
# 2. Create cluster
if toBoolean(preferLocal):
preferLocal = 'true'
else:
preferLocal = 'false'
parsedMembersList = []
if toBoolean(addClusterMembers):
if not membersList:
bailOut("No members to add.")
try:
parsedMembersList = parseServerListAsList(membersList, {'filterUnique': 1})
except:
forwardException(getExceptionMsg())
sys.exit(1)
clusterConfig = "[-clusterName '%s' -preferLocal '%s' -clusterType APPLICATION_SERVER]" % (clusterName, preferLocal)
clusterCreationParams = [
'-clusterConfig', clusterConfig
]
# now we're adding parameters if we want to convert server to be a first cluster member.
okConvertedLog = ''
if toBoolean(createFirstMember) and creationPolicy == 'convert':
nodeServer = splitNodeServer(sourceServerName)
clusterCreationParams.append('-convertServer')
convertParams = "[-serverName '%s' -serverNode '%s' -resourcesScope '%s']" % (nodeServer['Server'], nodeServer['Node'], promotionPolicy)
clusterCreationParams.append(convertParams)
okConvertedLog = "Server %s on node %s has been converted to be the first member of cluster %s" % (nodeServer['Server'], nodeServer['Node'], clusterName)
print clusterCreationParams
try:
AdminTask.createCluster(clusterCreationParams)
except:
forwardException(getExceptionMsg())
sys.exit(1)
logSuccessSummary("Cluster %s has been created" % (clusterName))
if creationPolicy == 'convert':
logSuccessSummary(okConvertedLog)
# 3. Add first cluster member
# TODO: Add weights
if toBoolean(createFirstMember) and creationPolicy in ['template', 'existing']:
logInfo("Creating first member from template or existing server")
try:
createFirstMemberParams = {
'clusterName' : clusterName,
'creationPolicy': creationPolicy,
'targetNode' : firstMemberNode,
'targetServer' : firstMemberName,
'templateName' : templateName,
'memberWeight' : firstMemberWeight,
'resourcesScope': promotionPolicy,
}
if toBoolean(firstMemberGenUniquePorts):
createFirstMemberParams['genUniquePorts'] = 'true'
else:
createFirstMemberParams['genUniquePorts'] = 'false'
if creationPolicy == 'existing':
nodeServer = splitNodeServer(sourceServerName)
createFirstMemberParams['sourceNode'] = nodeServer['Node']
createFirstMemberParams['sourceServer'] = nodeServer['Server']
createFirstClusterMember(createFirstMemberParams)
except:
forwardException(getExceptionMsg())
sys.exit(1)
if creationPolicy == 'template':
logSuccessSummary("First cluster member %s has been created on node %s from template %s" % (firstMemberName, firstMemberNode, templateName))
else:
logLine = "First cluster member %s has been created on node %s using server %s on node %s as source" \
% (firstMemberName, firstMemberNode, nodeServer['Server'], nodeServer['Node'])
logSuccessSummary(logLine)
# 4. Add cluster members
if toBoolean(addClusterMembers):
for server in parsedMembersList:
try:
createMemberParams = {
'clusterName': clusterName,
'targetNode': server['Node'],
'targetName': server['Server'],
'memberWeight': memberWeight,
'genUniquePorts': membersGenUniquePorts
}
# if toBooleanString(membersGenUniquePorts) in ['true', 'false']:
# createMemberParams['genUniquePorts'] = toBooleanString(membersGenUniquePorts)
createClusterMembers(createMemberParams)
except:
forwardException(getExceptionMsg())
sys.exit(1)
logSuccessSummary("Server %s on node %s has been created and added as cluster member" % (server['Server'], server['Node']))
AdminConfig.save()
if toBoolean(syncNodes):
syncActiveNodes()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/Cluster/create_cluster.py",
"copies": "1",
"size": "6277",
"license": "apache-2.0",
"hash": -4054412067017351000,
"line_mean": 32.747311828,
"line_max": 157,
"alpha_frac": 0.6888641071,
"autogenerated": false,
"ratio": 4.039253539253539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228117646353538,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
scope = '''
$[/myJobStep/tmpl/factoryScope]
'''.strip()
factoryType = '''
$[/myJobStep/tmpl/factoryType]
'''.strip()
# this parameter is responsible for flow
api = '''
$[/myJobStep/tmpl/wasApi]
'''.strip()
requestParams = '''
$[/myJobStep/tmpl/requestParameters]
'''.strip()
editParams = '''
$[/myJobStep/tmpl/editParameters]
'''.strip()
resName = '''
$[factoryAdministrativeName]
'''.strip()
mode = "create";
### some logic to switch to edit mode if required goes here:
editResourceScope = isResourceExists(scope, api, resName, factoryType)
if editResourceScope:
print "Raw resource scope: %s" %(editResourceScope)
mode = "edit"
### end of logic
print "Operation mode: %s" % (mode);
scopeResult = AdminConfig.getid(scope)
print scopeResult
actionResult = None
if mode == 'create':
print "Creating WMQ JMS Connection Factory"
actionResult = AdminTask.createWMQConnectionFactory(
scopeResult,
[requestParams]
)
print "Status: OK, Message: WMQ JMS Connection Factory %s has been created" % (resName)
elif mode == 'edit':
print "Edit WMQ JMS Connection Factory"
actionResult = AdminTask.modifyWMQConnectionFactory(
editResourceScope,
[editParams]
)
print "Status: OK, Message: WMQ JMS Connection Factory %s has been updated" % (resName)
else:
print "unknown action"
sys.exit(1)
AdminConfig.save()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/JMS/create_or_update_wmq_jms_connection_factory.py",
"copies": "1",
"size": "1427",
"license": "apache-2.0",
"hash": 6992209663415995000,
"line_mean": 22.7833333333,
"line_max": 91,
"alpha_frac": 0.6895585144,
"autogenerated": false,
"ratio": 3.365566037735849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45551245521358485,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
scope = '''
$[/myJobStep/tmpl/factoryScope]
'''.strip()
# this parameter is responsible for flow
api = '''
$[/myJobStep/tmpl/wasApi]
'''.strip()
requestParams = '''
$[/myJobStep/tmpl/requestParameters]
'''.strip()
editParams = '''
$[/myJobStep/tmpl/editParameters]
'''.strip()
resName = '''
$[factoryAdministrativeName]
'''.strip()
mode = "create";
### some logic to switch to edit mode if required goes here:
editResourceScope = isResourceExists(scope, api, resName)
if editResourceScope:
print "Raw resource scope: %s" %(editResourceScope)
mode = "edit"
### end of logic
print "Operation mode: %s" % (mode);
scopeResult = AdminConfig.getid(scope)
print scopeResult
actionResult = None
if mode == 'create':
print "Creating SIB JMS Connection Factory"
actionResult = AdminTask.createSIBJMSConnectionFactory(
scopeResult,
[requestParams]
)
print "Status: OK, Message: SIB JMS Connection Factory %s has been created" % (resName)
elif mode == 'edit':
print "Edit SIB JMS Connection Factory"
actionResult = AdminTask.modifySIBJMSConnectionFactory(
editResourceScope,
[editParams]
)
print "Status: OK, Message: SIB JMS Connection Factory %s has been updated" % (resName)
else:
print "unknown action"
sys.exit(1)
AdminConfig.save()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/JMS/create_or_update_sib_jms_connection_factory.py",
"copies": "1",
"size": "1359",
"license": "apache-2.0",
"hash": -4887193719842915000,
"line_mean": 22.8421052632,
"line_max": 91,
"alpha_frac": 0.6909492274,
"autogenerated": false,
"ratio": 3.372208436724566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9545555576250405,
"avg_score": 0.0035204175748321533,
"num_lines": 57
} |
$[/myProject/wsadmin_scripts/preamble.py]
scope = '''
$[/myJobStep/tmpl/queueScope]
'''.strip()
# this parameter is responsible for flow
api = '''
$[/myJobStep/tmpl/wasApi]
'''.strip()
requestParams = '''
$[/myJobStep/tmpl/requestParameters]
'''.strip()
resName = '''
$[queueAdministrativeName]
'''.strip()
mode = "create";
### some logic to switch to edit mode if required goes here:
editResourceScope = isResourceExists(scope, api, resName)
if editResourceScope:
print "Raw resource scope: %s" %(editResourceScope)
mode = "edit"
### end of logic
print "Operation mode: %s" % (mode);
scopeResult = AdminConfig.getid(scope)
print scopeResult
actionResult = None
if api == 'WMQ_Queue' and mode == 'create':
print "Creating WMQ Queue"
actionResult = AdminTask.createWMQQueue(
scopeResult,
[requestParams]
)
print "Status: OK, Message: WMQ JMS Queue %s has been created" % (resName)
elif api == 'WMQ_Queue' and mode == 'edit':
print "Editing WMQ_Queue"
actionResult = AdminTask.modifyWMQQueue(
editResourceScope,
[requestParams]
)
print "Status: OK, Message: WMQ JMS Queue %s has been updated" % (resName)
elif api == 'SIB_Queue' and mode == 'create':
print "Creating SIB Queue"
actionResult = AdminTask.createSIBJMSQueue(
scopeResult,
[requestParams]
)
print "Status: OK, Message: SIB JMS Queue %s has been created" % (resName)
elif api == 'SIB_Queue' and mode == 'edit':
print "Editing SIB_Queue"
actionResult = AdminTask.modifySIBJMSQueue(
editResourceScope,
[requestParams]
)
print "Status: OK, Message: SIB JMS Queue %s has been updated" % (resName)
else:
print "unknown action"
sys.exit(1)
AdminConfig.save()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/JMS/create_or_update_jms_queue.py",
"copies": "1",
"size": "1767",
"license": "apache-2.0",
"hash": -2112040736213038300,
"line_mean": 24.9852941176,
"line_max": 78,
"alpha_frac": 0.6644029428,
"autogenerated": false,
"ratio": 3.2843866171003717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9435633334067235,
"avg_score": 0.002631245166627207,
"num_lines": 68
} |
$[/myProject/wsadmin_scripts/preamble.py]
scope = '''
$[/myJobStep/tmpl/specScope]
'''.strip()
# this parameter is responsible for flow
api = '''
$[/myJobStep/tmpl/wasApi]
'''.strip()
requestParams = '''
$[/myJobStep/tmpl/requestParameters]
'''.strip()
resName = '''
$[specAdministrativeName]
'''.strip()
mode = "create";
### some logic to switch to edit mode if required goes here:
editResourceScope = isResourceExists(scope, api, resName)
if editResourceScope:
print "Raw resource scope: %s" %(editResourceScope)
mode = "edit"
### end of logic
print "Operation mode: %s" % (mode);
scopeResult = AdminConfig.getid(scope)
print scopeResult
actionResult = None
if mode == 'create':
print "Creating SIB JMS ActivationSpec"
actionResult = AdminTask.createSIBJMSActivationSpec(
scopeResult,
[requestParams]
)
print "Status: OK, Message: SIB JMS Activation Spec %s has been created" % (resName)
elif mode == 'edit':
print "Edit SIB JMS ActivationSpec"
actionResult = AdminTask.modifySIBJMSActivationSpec(
editResourceScope,
[requestParams]
)
print "Status: OK, Message: SIB JMS Activation Spec %s has been updated" % (resName)
else:
print "unknown action"
sys.exit(1)
AdminConfig.save()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/JMS/create_or_update_sib_jms_activation_spec.py",
"copies": "1",
"size": "1272",
"license": "apache-2.0",
"hash": -8855119619596951000,
"line_mean": 23,
"line_max": 88,
"alpha_frac": 0.6886792453,
"autogenerated": false,
"ratio": 3.3473684210526318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45360476663526317,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
scope = '''
$[/myJobStep/tmpl/topicScope]
'''.strip()
# this parameter is responsible for flow
api = '''
$[/myJobStep/tmpl/wasApi]
'''.strip()
requestParams = '''
$[/myJobStep/tmpl/requestParameters]
'''.strip()
resName = '''
$[topicAdministrativeName]
'''.strip()
mode = "create";
### some logic to switch to edit mode if required goes here:
editResourceScope = isResourceExists(scope, api, resName)
if editResourceScope:
print "Raw resource scope: %s" %(editResourceScope)
mode = "edit"
### end of logic
print "Operation mode: %s" % (mode);
scopeResult = AdminConfig.getid(scope)
print scopeResult
actionResult = None
if api == 'WMQ_Topic' and mode == 'create':
print "Creating WMQ Topic"
actionResult = AdminTask.createWMQTopic(
scopeResult,
[requestParams]
)
print "Status: OK, Message: WMQ JMS Topic %s has been created" % (resName)
elif api == 'WMQ_Topic' and mode == 'edit':
print "Editing WMQ_Topic"
actionResult = AdminTask.modifyWMQTopic(
editResourceScope,
[requestParams]
)
print "Status: OK, Message: WMQ JMS Topic %s has been updated" % (resName)
elif api == 'SIB_Topic' and mode == 'create':
print "Creating SIB Topic"
actionResult = AdminTask.createSIBJMSTopic(
scopeResult,
[requestParams]
)
print "Status: OK, Message: SIB JMS Topic %s has been created" % (resName)
elif api == 'SIB_Topic' and mode == 'edit':
print "Editing SIB_Topic"
actionResult = AdminTask.modifySIBJMSTopic(
editResourceScope,
[requestParams]
)
print "Status: OK, Message: SIB JMS Topic %s has been updated" % (resName)
else:
print "unknown action"
sys.exit(1)
AdminConfig.save()
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/JMS/create_or_update_jms_topic.py",
"copies": "1",
"size": "1767",
"license": "apache-2.0",
"hash": -7928497842262585000,
"line_mean": 24.9852941176,
"line_max": 78,
"alpha_frac": 0.6644029428,
"autogenerated": false,
"ratio": 3.2601476014760147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411394318442878,
"avg_score": 0.002631245166627207,
"num_lines": 68
} |
$[/myProject/wsadmin_scripts/preamble.py]
# start application server jython script.
# properties retrieval section
serversList = '''
$[wasServersList]
'''.strip();
waitTime = '''
$[wasWaitTime]
'''.strip()
# Mandatory parameters validation.
if not serversList:
logError("Missing servers list to be started")
sys.exit(1)
# values that defined for script
waitTime = uintOrZero(waitTime)
okServerStatus = 'STARTED'
sleepTime = 5
iterationsCount = waitTime / sleepTime
iterationsCount += 1
parsedServerList = parseServerListAsList(serversList, {"expandStar": 1})
# check server states section
for x in range (0, len(parsedServerList)):
server = parsedServerList[x]
serverStatus = ''
try:
serverStatus = showServerStatus(server['Node'], server['Server'])
except:
logSummary("Failed to check status of %s on node %s" % (server[Server], server['Node']))
forwardException(getExceptionMsg())
sys.exit(1)
if serverStatus == okServerStatus:
logWarning("Server %s on Node %s is already %s" % (server['Server'], server['Node'], okServerStatus))
parsedServerList[x] = 0
elif serverStatus == 'UNKNOWN!':
logError("Failed to start server %s on node %s" % (server['Server'], server['Node']))
sys.exit(1)
print serverStatus
parsedServerList = filter(lambda x: x, parsedServerList)
if len(parsedServerList) == 0:
logWarning("Nothing to do, all servers are already %s" % (okServerStatus))
logSummary("All servers are already %s" % (okServerStatus))
os._exit(0)
# starting the servers
for server in parsedServerList:
params = '[-serverName "%s" -nodeName "%s"]' % (server['Server'], server['Node'])
try:
if not is_8_0_0(server['Node']):
result = AdminTask.startMiddlewareServer(params)
print "Server start result: ", result
else:
print "Falling back to 8.0.0 mode"
result = startApplicationServer8_0_0(server['Node'], server['Server'])
print "Server start result: ", result
except:
forwardException(getExceptionMsg())
logSummary("Failed to start server %s on node %s" % (server['Server'], server['Node']))
sys.exit(1)
startResults = []
# now we're checking that servers are started
startedServers = 0
for i in range(0, iterationsCount):
for server in parsedServerList:
if 'State' in server.keys() and server['State'] == okServerStatus:
continue
serverStatus = ''
try:
serverStatus = showServerStatus(server['Node'], server['Server'])
except:
logSummary("Failed to check state of server %s on node %s" % (server['Server'], server['Node']))
forwardException(getExceptionMsg())
sys.exit(1)
# Here we should handle exceptions.
server['State'] = serverStatus
if serverStatus == okServerStatus:
startedServers += 1
print serverStatus
if startedServers >= len(parsedServerList):
break
time.sleep(sleepTime)
# handle procedure results
print "Procedure result:\n"
for server in parsedServerList:
logSummary("Node: %s, Server: %s, State: %s" % (server['Node'], server['Server'], server['State']))
if startedServers != len(parsedServerList):
logError("Some servers are failed to start")
sys.exit(1)
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/Server/start_application_servers.py",
"copies": "1",
"size": "3379",
"license": "apache-2.0",
"hash": -1481921765869631200,
"line_mean": 32.1274509804,
"line_max": 109,
"alpha_frac": 0.6507842557,
"autogenerated": false,
"ratio": 3.8794489092996556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030233164999656,
"avg_score": null,
"num_lines": null
} |
$[/myProject/wsadmin_scripts/preamble.py]
# stop application server jython script.
# properties retrieval section
serversList = '''
$[wasServersList]
'''.strip();
waitTime = '''
$[wasWaitTime]
'''.strip()
# Mandatory parameters validation.
if not serversList:
logError("Missing servers list to be stopped")
sys.exit(1)
# values that defined for script
waitTime = uintOrZero(waitTime)
okServerStatus = 'Stopped'
sleepTime = 5
iterationsCount = waitTime / sleepTime
iterationsCount += 1
parsedServerList = parseServerListAsList(serversList, {"expandStar": 1})
# check server states section
for x in range (0, len(parsedServerList)):
server = parsedServerList[x]
serverStatus = ''
try:
serverStatus = showServerStatus(server['Node'], server['Server'])
except:
logSummary("Failed to check status of %s on node %s" % (server[Server], server['Node']))
forwardException(getExceptionMsg())
sys.exit(1)
if serverStatus == okServerStatus:
logWarning("Server %s on Node %s is already %s" % (server['Server'], server['Node'], okServerStatus))
parsedServerList[x] = 0
elif serverStatus == 'UNKNOWN!':
logError("Failed to stop server %s on node %s" % (server['Server'], server['Node']))
sys.exit(1)
print serverStatus
parsedServerList = filter(lambda x: x, parsedServerList)
if len(parsedServerList) == 0:
logWarning("Nothing to do, all servers are already %s" % (okServerStatus))
logSummary("All servers are already %s" % (okServerStatus))
os._exit(0)
# stopping the servers
for server in parsedServerList:
params = '[-serverName "%s" -nodeName "%s"]' % (server['Server'], server['Node'])
try:
if not is_8_0_0(server['Node']):
result = AdminTask.stopMiddlewareServer(params)
print "Server stop result: ", result;
else:
print "Falling back to 8.0.0 mode"
result = stopApplicationServer8_0_0(server['Node'], server['Server'])
print "Server stop result: ", result
except:
forwardException(getExceptionMsg())
logSummary("Failed to stop server %s on node %s" % (server['Server'], server['Node']))
sys.exit(1)
stopResults = []
# now we're checking that servers are stopped
stoppedServers = 0
for i in range(0, iterationsCount):
for server in parsedServerList:
if 'State' in server.keys() and server['State'] == okServerStatus:
continue
serverStatus = ''
try:
serverStatus = showServerStatus(server['Node'], server['Server'])
except:
logSummary("Failed to check state of server %s on node %s" % (server['Server'], server['Node']))
forwardException(getExceptionMsg())
sys.exit(1)
# Here we should handle exceptions.
server['State'] = serverStatus
if serverStatus == okServerStatus:
stoppedServers += 1
print serverStatus
if stoppedServers >= len(parsedServerList):
break
time.sleep(sleepTime)
# handle procedure results
print "Procedure result:\n"
for server in parsedServerList:
logSummary("Node: %s, Server: %s, State: %s" % (server['Node'], server['Server'], server['State']))
if stoppedServers != len(parsedServerList):
logError("Some servers are failed to stop")
sys.exit(1)
| {
"repo_name": "electric-cloud/EC-WebSphere",
"path": "src/main/resources/project/wsadmin_scripts/Server/stop_application_servers.py",
"copies": "1",
"size": "3371",
"license": "apache-2.0",
"hash": 3906159006067610600,
"line_mean": 32.0490196078,
"line_max": 109,
"alpha_frac": 0.6496588549,
"autogenerated": false,
"ratio": 3.8658256880733943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9945257215796133,
"avg_score": 0.014045465435452225,
"num_lines": 102
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.