text
stringlengths 0
1.05M
| meta
dict |
---|---|
# $$\ $$\ $$$$$$$$\ $$\ $$\
# $$ | \__| \__$$ __| $$ | $$ |
# $$ | $$\ $$$$$$$\ $$ | $$$$$$\ $$$$$$$\ $$ | $$$$$$\
# $$ | $$ |$$ __$$\ $$ | \____$$\ $$ __$$\ $$ |$$ __$$\
# $$ | $$ |$$ | $$ |$$ | $$$$$$$ |$$ | $$ |$$ |$$$$$$$$ |
# $$ | $$ |$$ | $$ |$$ |$$ __$$ |$$ | $$ |$$ |$$ ____|
# $$$$$$$$\ $$ |$$ | $$ |$$ |\$$$$$$$ |$$$$$$$ |$$ |\$$$$$$$\
# \________|\__|\__| \__|\__| \_______|\_______/ \__| \_______|
# Simple and quick implementation of Lin tables for indexing Sz spin states
from __future__ import division, print_function
import numpy as np
from itertools import permutations # necessary in LinTable
class Table(object):
def __init__(self, n, nu, nd):
self.nu = nu
self.nd = nd
self.n = n
if nu > n or nd > n:
self.Jdu, self.Nu, self.basisu = (['1'], 1, [])
self.Jdv, self.Nd, self.basisd = (['1'], 1, [])
else:
self.Juv, self.Nu, self.basisu = states(self.n, self.nu)
self.Jdv, self.Nd, self.basisd = states(self.n, self.nd)
@property
def Js(self):
"""get J indices"""
return {'u': self.Juv, 'd': self.Jdv}
@property
def Ns(self):
"""Get the Ns"""
return (self.Nu, self.Nd)
@property
def ns(self):
return (self.nu, self.nd)
@property
def ne(self):
return self.nu + self.nd
def states(n, nu):
"""
Create all many-body spin states
Parameters
----------
n : int
number of sites
nu : int
number of on spin-specie
"""
x = [0]*(n-nu) + [1]*nu
states = np.array(unique_permutations(x), dtype=int)
N = states.shape[0]
Jv = bi2de(states)
return (Jv, N, states)
def state2index(states, Juv, Jdv=None):
"""
Parameters
----------
states : ndarray
states to index
Juv : list
indexes of the spin-up subspace
Jdv : list
index of the spin-down subspace
"""
Nu = Juv.shape[0]
Ju = {J: i for i, J in enumerate(Juv)}
if Jdv is None:
if len(states.shape) < 2:
states = np.array([states])
Js = np.array([Ju[i] for i in bi2de(states)])
else:
# Nd = Jdv.shape[0]
Jd = {J: i for i, J in enumerate(Jdv)}
n = states.shape[1]/2
Ius = bi2de(states[:, 1:n])
Ids = bi2de(states[:, n+1:])
Js = np.array([Jd[i] for i in Ids])*Nu + np.array([Ju[i] for i in Ius])
return Js
def index2state(Is, n, Juv, Jdv=None):
"""
Returns state with a given index
Parameters
----------
Is : ndarray
list of indices
n : int
number of sites
Juv : ndarray
Lin table of spin-up states
Jdv : ndarray
Lin table for spin-down states
"""
Nu = Juv.shape[0]
if Jdv is None:
Ius = np.mod(Is, Nu)
states_up = de2bi(Juv[Ius], n)
return states_up
else:
# Nd = Jdv.shape[0]
Ius = np.mod(Is, Nu)
Ids = np.floor(Is/Nu).astype(int)
states_up = de2bi(Juv[Ius], n)
states_down = de2bi(Jdv[Ids], n)
return (states_up, states_down)
def unique_permutations(elements):
"""
Get all unique permutations of a list of elements
Parameters
----------
elements : list
a list containing the elements
"""
n = len(elements)
uniques = list(set(elements))
nu = len(uniques)
if not elements:
return []
elif n == 1 or nu == 1:
return [elements]
elif n == nu:
ps = permutations(elements)
return [list(p) for p in ps]
else:
pu = []
# collect the results
for i in np.arange(nu):
# copy elements into v
v = list(elements)
# first instance of unique element
ind = elements.index(uniques[i])
# remove this element
del v[ind]
# extend the result
pu.extend([[uniques[i]] + perm for perm in unique_permutations(v)])
return pu
def bi2de(binaries):
"""
Parameters
----------
binaries : ndarray
Here one row is one binary number.
"""
n = binaries.shape[0]
if len(binaries.shape) > 1:
n = binaries.shape[1]
decimals = np.dot(binaries, np.power(2, np.arange(n-1, -1, -1)))
# print('d: {0}'.format(decimals))
# if (decimals.size == 1):
# return [decimals]
return decimals
def de2bi(decimals, n=None):
"""
Parameters
----------
decimals : ndarray
vector of decimals
n : int
number of binary digits
"""
decimals = np.array(decimals)
try:
nd = np.ceil(np.log2(np.max(decimals)))
except RuntimeWarning:
print('{0}:{1}'.format(decimals, n))
if n is None or n < nd:
n = nd
return np.remainder(np.floor(np.outer(decimals, np.power(2., np.arange(1-n,1)))), 2).astype(int)
| {
"repo_name": "georglind/humo",
"path": "humo/xactlintable.py",
"copies": "1",
"size": "5041",
"license": "mit",
"hash": -1903651756788467700,
"line_mean": 23.1196172249,
"line_max": 100,
"alpha_frac": 0.475104146,
"autogenerated": false,
"ratio": 3.180441640378549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4155545786378549,
"avg_score": null,
"num_lines": null
} |
# $$\ $$\ $$\ $$\
# $$ | $$ | $$ | $$ |
# $$ | $$ |$$\ $$\ $$$$$$$\ $$ | $$\ $$$$$$\ $$ |
# $$$$$$$$ |$$ | $$ |$$ _____|$$ | $$ |$$ __$$\ $$ |
# $$ __$$ |$$ | $$ |$$ / $$$$$$ / $$$$$$$$ |$$ |
# $$ | $$ |$$ | $$ |$$ | $$ _$$< $$ ____|$$ |
# $$ | $$ |\$$$$$$ |\$$$$$$$\ $$ | \$$\ \$$$$$$$\ $$ |
# \__| \__| \______/ \_______|\__| \__| \_______|\__|
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
def transmission(energies, H, GL, GR, RealSigLR=None):
"""
Huckel transmission from the Landauer formula T(E) = Tr(G Gamma_L G^\dagger Gamma_R)
Parameters
----------
energies: ndarray
list of energies
H : ndarray
Huckel Hamiltonian
GL : ndarray
vector of couplings to left lead
GR : ndarray
vector of couplings to right lead
RealSigLR : ndarray
vector containing the real part of the self-energy
"""
# left and right coupling matrices
GamL = -np.diag(GL)
GamR = -np.diag(GR)
# Lead self energies Sigma_L + Sigma_R here assumed entirely imaginary
SigLR = -1j/2*(GamL + GamR)
if (RealSigLR is not None):
SigLR = SigLR + RealSigLR
T = np.zeros((len(energies)), dtype=np.float64)
for i, E in enumerate(energies):
try:
G = np.linalg.inv(E*np.eye(H.shape[0]) - H - SigLR)
T[i] = np.abs(np.trace(GamL.dot(G).dot(GamR).dot(np.conjugate(G.T))))
except np.linalg.LinAlgError:
print("Green's function not defined at E={0}".format(E))
return T
def seebeck_coefficient(Es, H, GL, GR, T=0):
"""
Seebeck coefficient from the Mott relation S = d ln(T)/ d (units of V/K)
"""
Tn = transmission(Es, H, GL, GR)
S = np.pi**2/3*1/(11604)*T/(11604)*np.diff(np.log(Tn))/(Es[1] - Es[0])
return S
def coupling(n, m, eta):
"""
Coupling vectors for the
"""
G = np.zeros((n,))
G[m] = eta
return G
| {
"repo_name": "georglind/humo",
"path": "humo/huckel.py",
"copies": "1",
"size": "2093",
"license": "mit",
"hash": -788217211057895300,
"line_mean": 28.4788732394,
"line_max": 88,
"alpha_frac": 0.4567606307,
"autogenerated": false,
"ratio": 2.757575757575758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37143363882757574,
"avg_score": null,
"num_lines": null
} |
from ctypes import *
import os
import numpy
import pkg_resources
class SWaligner(object):
def __init__(self, soFile=None):
# setup.py should put sw.so in the following path.
if soFile is None:
self.SW_DLL_PATH = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "sw.so"
else:
self.SW_DLL_PATH = os.path.abspath( soFile )
self._dll = CDLL(self.SW_DLL_PATH)
self.dpMat = self._dll.allocate_dp_mat()
def score(self, tSeq, qSeq):
score = self._dll.compute_align_score(self.dpMat, tSeq, qSeq)
self._dll.print_dp_mat(self.dpMat, tSeq, qSeq)
print "Max: %s" % score
return score
def makeScorer(self, targets):
ScoreType = c_int * len(targets)
scores = ScoreType()
for i in range(0, len(scores)):
scores[i] = 0
TargetType = c_char_p * len(targets)
targetSeqs = TargetType()
for i in range(0, len(targetSeqs)):
targetSeqs[i] = targets[i]
targetLen = len(targets)
def scorer(query):
if not query:
return numpy.zeros(len(targets))
self._dll.compute_align_scores(scores,
targetLen,
self.dpMat,
query,
targetSeqs)
return numpy.array([scores[i] for i in xrange(0, len(scores))])
return scorer
| {
"repo_name": "bnbowman/BarcodeAnalysis",
"path": "BarcodeAnalysis/SWaligner.py",
"copies": "1",
"size": "3293",
"license": "mit",
"hash": 4638747237572512000,
"line_mean": 43.5,
"line_max": 97,
"alpha_frac": 0.6155481324,
"autogenerated": false,
"ratio": 4.265544041450777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5381092173850777,
"avg_score": null,
"num_lines": null
} |
import logging
from pbcore.io import BasH5Reader, BaxH5Reader
from pbcore.io.FastaIO import *
import BarcodeAnalysis.SWaligner as Aligner
import numpy as n
from pbcore.io.BarcodeH5Reader import LabeledZmw, \
BARCODE_DELIMITER
__RC_MAP__ = dict(zip('ACGTacgt-N','TGCAtgca-N'))
class BarcodeScorer(object):
def __init__(self, basH5, barcodeFasta,
adapterSidePad = 4, insertSidePad = 4,
scoreMode = 'paired', maxHits = 10,
scoreFirst = False, startTimeCutoff = 1,
minScore = 20,
soFile=None):
"""A BarcodeScorer object scores ZMWs and produces summaries
of the scores. Various parameters control the behavior of the
object, specifically the padding allows the user to add a
little extra on each side of the adapter find for safety. The
most relevant parameter is the scoreMode which dictates how
the barcodes are scored, either paired or symmetric."""
self.basH5 = basH5
self.barcodeFasta = list(barcodeFasta)
self.aligner = Aligner.SWaligner(soFile)
self.barcodeLength = n.unique(map(lambda x : len(x.sequence),
self.barcodeFasta))
if len(self.barcodeLength) > 1:
raise Exception("Currently, all barcodes must be the same length.")
else:
self.barcodeLength = int(self.barcodeLength)
self.adapterSeq = "ATCTCTCTCTTTTCCTCCTCCTCCGTTGTTGTTGTTGAGAGAGAT"
self.adapterSeqRc = self._rc("ATCTCTCTCTTTTCCTCCTCCTCCGTTGTTGTTGTTGAGAGAGAT")
self.barcodeSeqs = [(barcode.sequence.upper(),
self._rc(barcode.sequence.upper()))
for barcode in self.barcodeFasta]
self.adapterSidePad = adapterSidePad
self.insertSidePad = insertSidePad
self.maxHits = maxHits
self.minScore = minScore
if scoreMode not in ['symmetric', 'paired']:
raise Exception("scoreMode must either be symmetric or paired")
self._scoreMode = scoreMode
self.scoreFirst = scoreFirst
self.startTimeCutoff = startTimeCutoff
self.threePrimeSeqs = [(x[0] if (i%2)==0 else x[1]) for i,x in enumerate(self.barcodeSeqs)]
self.fivePrimeSeqs = [(x[1] if (i%2)==0 else x[0]) for i,x in enumerate(self.barcodeSeqs)]
self.fivePrimeSeqsRc = [self._rc(s) for s in self.fivePrimeSeqs]
def fwdBcAdp( seq_pair ):
return "{0}{1}{2}".format(seq_pair[1], self.adapterSeqRc, seq_pair[0])
def revBcAdp( seq_pair ):
return "{0}{1}{2}".format(seq_pair[0], self.adapterSeqRc, seq_pair[1])
self.adapterBcSeqs = [fwdBcAdp(p) if (i%2)==0 else revBcAdp(p) for i,p in enumerate(self.barcodeSeqs)]
self.threePrimeScorer = self.aligner.makeScorer(self.threePrimeSeqs)
self.fivePrimeScorer = self.aligner.makeScorer(self.fivePrimeSeqs)
self.fivePrimeScorerRc = self.aligner.makeScorer(self.fivePrimeSeqsRc)
self.adapterScorer = self.aligner.makeScorer(self.adapterBcSeqs)
self.forwardScorer = self.aligner.makeScorer([x[0] for x in self.barcodeSeqs])
self.reverseScorer = self.aligner.makeScorer([x[1] for x in self.barcodeSeqs])
logging.debug(("Constructed BarcodeScorer with scoreMode: %s," + \
"adapterSidePad: %d, insertSidePad: %d, and scoreFirst: %r") \
% (scoreMode, adapterSidePad, insertSidePad, scoreFirst))
@property
def movieName(self):
return self.basH5.movieName
def makeBCLabel(self, s1, s2):
return BARCODE_DELIMITER.join((s1, s2))
@property
def barcodeLabels(self):
"""The barcode labels are function of the barcodeNames and the
scoreMode, they represent the user-visible names."""
if self.scoreMode == 'paired':
return n.array([self.makeBCLabel(self.barcodeFasta[i].name,
self.barcodeFasta[i+1].name) for i
in xrange(0, len(self.barcodeSeqs), 2)])
else:
return n.array([self.makeBCLabel(x.name, x.name) for x in self.barcodeFasta])
@property
def barcodeNames(self):
"""The barcode names are the FASTA names"""
return n.array([x.name for x in self.barcodeFasta])
@property
def barcodeNames2(self):
return [x.name for x in self.barcodeFasta]
@property
def scoreMode(self):
return self._scoreMode
def _rc(self, s):
return "".join([__RC_MAP__[c] for c in s[::-1]])
def _adapterSeqs(self, zmw):
def fromRange(rStart, rEnd):
try:
adpSeq = zmw.read(rStart - (self.barcodeLength + self.insertSidePad),
rEnd + self.barcodeLength + self.insertSidePad).basecalls()
except IndexError:
return None
return adpSeq
adapterRegions = zmw.adapterRegions
if len(adapterRegions) > self.maxHits:
adapterRegions = adapterRegions[0:self.maxHits]
seqs = [fromRange(start, end) for (start, end) in adapterRegions]
return seqs
def _flankingSeqs(self, zmw, trim=False):
def fromRange(rStart, rEnd):
try:
qSeqLeft = zmw.read(rStart - (self.barcodeLength + self.insertSidePad),
rStart + self.adapterSidePad).basecalls()
except IndexError:
qSeqLeft = None
try:
qSeqRight = zmw.read(rEnd - self.adapterSidePad,
rEnd + self.barcodeLength +
self.insertSidePad).basecalls()
except IndexError:
qSeqRight = None
return (qSeqLeft, qSeqRight)
# If requested, trim to only adapters entirely in the HQ region
adapterRegions = zmw.adapterRegions
if trim:
if adapterRegions[0][0] < zmw.hqRegion[0]:
adapterRegions = adapterRegions[1:]
if adapterRegions[-1][1] > zmw.hqRegion[1]:
adapterRegions = adapterRegions[:-1]
# If we still have more than the maximum allowed hits, trim
if len(adapterRegions) > self.maxHits:
adapterRegions = adapterRegions[0:self.maxHits]
seqs = [fromRange(start, end) for (start, end) in adapterRegions]
# We only score the first barcode if we don't find any adapters
# *and* the start time is less than the threshold.
scoredFirst = False
if self.scoreFirst and not len(seqs):
s = zmw.zmwMetric('HQRegionStartTime')
e = zmw.zmwMetric('HQRegionEndTime')
# s<e => has HQ.
if s < e and s <= self.startTimeCutoff:
l = self.barcodeLength + self.insertSidePad
l = l if zmw.hqRegion[1] > l else zmw.hqRegion[1]
try:
bc = zmw.read(0, l).basecalls()
if len(bc) >= self.barcodeLength:
seqs.insert(0, (bc, None))
scoredFirst = True
except IndexError:
pass
return (seqs, scoredFirst)
def testAligner(self, holeNumbers):
for holeNumber in holeNumbers:
print holeNumber
zmw = self.basH5[holeNumber]
print zmw
adapters, _ = self._flankingSeqs(zmw)
for left, right in adapters:
for fwd, rev in self.barcodeSeqs:
print len(fwd), len(rev)
if left:
print "Left, Fwd"
self.aligner.score(left, fwd)
print "Left, Rev"
self.aligner.score(left, rev)
if right:
print "Right, Fwd"
self.aligner.score(right, fwd)
print "Right, Rev"
self.aligner.score(right, rev)
def scoreZmw(self, zmw):
adapters, scoredFirst = self._flankingSeqs(zmw)
adapterScores = [[]]*len(adapters)
barcodeScores = n.zeros(len(self.barcodeSeqs))
for i,adapter in enumerate(adapters):
fscores = self.forwardScorer(adapter[0])
rscores = self.reverseScorer(adapter[0])
ffscores = self.forwardScorer(adapter[1])
rrscores = self.reverseScorer(adapter[1])
scored = 2.0 if adapter[0] and adapter[1] \
else 1.0 if adapter[0] or adapter[1] \
else 0
# An adapter score is the average barcode score for
# each barcode -- that way, you can compare across
# adapters even if the different adapters have
# different numbers of flanking sequence.
if scored == 0:
adapterScores[i] = barcodeScores
else:
adapterScores[i] = n.maximum((fscores + rrscores)/scored,
(rscores + ffscores)/scored)
barcodeScores = reduce(lambda x, y: x + y, adapterScores) if adapterScores \
else n.zeros(len(self.barcodeSeqs))
return (barcodeScores, adapterScores)
def scoreZmw2(self, zmw):
adapters, scoredFirst = self._flankingSeqs(zmw)
adapterScores = [[]]*len(adapters)
barcodeScores = n.zeros(len(self.barcodeSeqs))
for i,adapter in enumerate(adapters):
fscores = self.fivePrimeScorerRc(adapter[0])
rscores = self.threePrimeScorer(adapter[0])
ffscores = self.fivePrimeScorerRc(adapter[1])
rrscores = self.threePrimeScorer(adapter[1])
scored = 2.0 if adapter[0] and adapter[1] \
else 1.0 if adapter[0] or adapter[1] \
else 0
# An adapter score is the average barcode score for
# each barcode -- that way, you can compare across
# adapters even if the different adapters have
# different numbers of flanking sequence.
if scored == 0:
adapterScores[i] = barcodeScores
else:
adapterScores[i] = n.maximum((fscores + rrscores)/scored,
(rscores + ffscores)/scored)
barcodeScores = reduce(lambda x, y: x + y, adapterScores) if adapterScores \
else n.zeros(len(self.barcodeSeqs))
return (barcodeScores, adapterScores)
def scoreZmw3(self, zmw):
adapters, scoredFirst = self._flankingSeqs(zmw)
adapterScores = [[]]*len(adapters)
barcodeScores = n.zeros(len(self.barcodeSeqs))
for i,adapter in enumerate(adapters):
fscores = self.fivePrimeScorer(adapter[0])
tscores = self.threePrimeScorer(adapter[1])
filteredF = n.array([(s if s >= self.minScore else 0) for s in fscores])
filteredT = n.array([(s if s >= self.minScore else 0) for s in tscores])
#filteredCounts = n.array([(2.0 if filteredF[i] and filteredT[i] else 1.0) for i in range(len(fscores))])
scored = 2.0 if adapter[0] and adapter[1] else \
1.0 if adapter[0] or adapter[1] else 0
# An adapter score is the average barcode score for
# each barcode -- that way, you can compare across
# adapters even if the different adapters have
# different numbers of flanking sequence.
if scored == 0:
adapterScores[i] = barcodeScores
else:
adapterScores[i] = (filteredF + filteredT)/scored
#adapterScores[i] = (fscores + tscores)/scored
barcodeScores = reduce(lambda x, y: x + y, adapterScores) if adapterScores \
else n.zeros(len(self.barcodeSeqs))
return (barcodeScores, adapterScores)
def scoreZmwRc(self, zmw, trim=False):
adapters, scoredFirst = self._flankingSeqs(zmw, trim)
adapters2 = [((a[0], a[1]) if a[0] is None else (self._rc(a[0]), a[1])) for a in adapters]
adapterScores = [[]]*len(adapters)
barcodeScores = n.zeros(len(self.barcodeSeqs))
for i,adapter in enumerate(adapters2):
fscores = self.fivePrimeScorerRc(adapter[0])
tscores = self.threePrimeScorer(adapter[1])
scored = 2.0 if adapter[0] and adapter[1] \
else 1.0 if adapter[0] or adapter[1] \
else 0
# An adapter score is the average barcode score for
# each barcode -- that way, you can compare across
# adapters even if the different adapters have
# different numbers of flanking sequence.
if scored == 0:
adapterScores[i] = barcodeScores
else:
#adapterScores[i] = (filteredF + filteredT)/scored
adapterScores[i] = (fscores + tscores)/scored
barcodeScores = reduce(lambda x, y: x + y, adapterScores) if adapterScores \
else n.zeros(len(self.barcodeSeqs))
return (barcodeScores, adapterScores)
def scoreZmwRc2(self, zmw):
adapters, scoredFirst = self._flankingSeqs(zmw)
adapters2 = [((a[0], a[1]) if a[0] is None else (self._rc(a[0]), a[1])) for a in adapters]
for i,adapter in enumerate(adapters2):
fscores = self.threePrimeScorer(adapter[0])
tscores = self.threePrimeScorer(adapter[1])
yield (fscores, tscores)
def scoreZmwAdps(self, zmw):
adapters = self._adapterSeqs(zmw)
perAdapterScores = [[]]*len(adapters)
perBarcodeScores = n.zeros(len(self.barcodeSeqs))
for i,adapter in enumerate(adapters):
perAdapterScores[i] = self.adapterScorer(adapter)
perBarcodeScores = reduce(lambda x, y: x + y, perAdapterScores) \
if perAdapterScores \
else n.zeros(len(self.barcodeSeqs))
return (perBarcodeScores, perAdapterScores)
def scoreSelectedAdapters(self, zmw, selectedAdp, selectedBc):
adapters, scoredFirst = self._flankingSeqs(zmw)
assert len(adapters) == len(selectedAdp)
selectedAdapters = [adapters[i] for i,v in enumerate(selectedAdp) if v == 1]
selectedBcPos = [self.barcodeNames2.index(bc) for bc in selectedBc]
selectedBcSeqs = [self.barcodeSeqs[i] for i in selectedBcPos]
for i, adps in enumerate(selectedAdapters):
fwdAdp, revAdp = adps
print "FORWARD"
for j, bc in enumerate(selectedBc):
fwdBc, revBc = selectedBcSeqs[j]
print "Adp #{0} - BC {1} - FwdAdp FwdBc".format(i+1, bc)
self.aligner.score(fwdAdp, fwdBc)
print "Adp #{0} - BC {1} - FwdAdp RevBc".format(i+1, bc)
self.aligner.score(fwdAdp, revBc)
print "REVERSE"
for j, bc in enumerate(selectedBc):
fwdBc, revBc = selectedBcSeqs[j]
print "Adp #{0} - BC {1} - RevAdp FwdBc".format(i+1, bc)
self.aligner.score(revAdp, fwdBc)
print "Adp #{0} - BC {1} - revAdp RevBc".format(i+1, bc)
self.aligner.score(revAdp, revBc)
print "END\n"
def scoreSelectedAdaptersRc(self, zmw, selectedAdp, selectedBc):
adapters, scoredFirst = self._flankingSeqs(zmw)
adapters2 = [((None, a[1]) if a[0] is None else (self._rc(a[0]), a[1])) for a in adapters]
assert len(adapters2) == len(selectedAdp)
selectedAdapters = [adapters2[i] for i,v in enumerate(selectedAdp) if v == 1]
selectedBcPos = [self.barcodeNames2.index(bc) for bc in selectedBc]
selected5pBcSeqs = [self.fivePrimeSeqsRc[i] for i in selectedBcPos]
selected3pBcSeqs = [self.threePrimeSeqs[i] for i in selectedBcPos]
for i, adps in enumerate(selectedAdapters):
fwdAdp, revAdp = adps
print "FORWARD"
for j, bc in enumerate(selectedBc):
fwdBc = selected5pBcSeqs[j]
print "Adp #{0} - BC {1} - FwdAdp FwdBc".format(i+1, bc)
self.aligner.score(fwdAdp, fwdBc)
print "REVERSE"
for j, bc in enumerate(selectedBc):
revBc = selected3pBcSeqs[j]
print "Adp #{0} - BC {1} - RevAdp RevBc".format(i+1, bc)
self.aligner.score(revAdp, revBc)
print "END\n"
def scoreSelectedAdapterRegions(self, zmw, selectedAdp, selectedBc):
adapters = self._adapterSeqs(zmw)
assert len(adapters) == len(selectedAdp)
selectedAdapters = [adapters[i] for i,v in enumerate(selectedAdp) if v == 1]
selectedBcPos = [self.barcodeNames2.index(bc) for bc in selectedBc]
selectedAdpBcSeqs = [self.adapterBcSeqs[i] for i in selectedBcPos]
print zmw.zmwName
for i, adp in enumerate(selectedAdapters):
for j, bcId in enumerate(selectedBc):
adpBc = selectedAdpBcSeqs[j]
print "Adp #{0} - BC {1} - FwdAdp FwdBc".format(i+1, bcId)
self.aligner.score(adp, adpBc)
print "END\n"
def chooseSymmetric(self, o):
p = n.argsort(-o[2])
return LabeledZmw(o[0], o[1], p[0], o[2][p[0]], p[1], o[2][p[1]], o[3])
def choosePaired(self, o):
if o[1] == 1:
s = n.array([max(o[2][i], o[2][i + 1]) for i in \
xrange(0, len(self.barcodeSeqs), 2)])
p = n.argsort(-s)
s = s[p]
else:
# score the pairs by scoring the two alternate
# ways they could have been put on the molecule. A
# missed adapter will confuse this computation.
scores = o[3]
results = n.zeros(len(self.barcodeSeqs)/2)
for i in xrange(0, len(self.barcodeSeqs), 2):
pths = [0,0]
for j in xrange(0, len(scores)):
pths[j % 2] += scores[j][i]
pths[1 - j % 2] += scores[j][i + 1]
results[i/2] = max(pths)
p = n.argsort(-results)
s = results[p]
return LabeledZmw(o[0], o[1], p[0], s[0], p[1], s[1], o[3])
def labelZmws(self, holeNumbers):
"""Return a list of LabeledZmws for input holeNumbers"""
# o here is the record immediately above.
if self.scoreMode == 'symmetric':
choose = self.chooseSymmetric
elif self.scoreMode == 'paired':
choose = self.choosePaired
else:
raise Exception("Unsupported scoring mode in BarcodeLabeler.py")
scored = [self.scoreZmw(self.basH5[zmw]) for zmw in holeNumbers]
return [choose(scoreTup) for scoreTup in scored if scoreTup[1]]
| {
"repo_name": "bnbowman/BarcodeAnalysis",
"path": "BarcodeAnalysis/BarcodeScorer.py",
"copies": "1",
"size": "20768",
"license": "mit",
"hash": 3103317154239369000,
"line_mean": 43.5665236052,
"line_max": 117,
"alpha_frac": 0.5850346687,
"autogenerated": false,
"ratio": 3.7026207880192548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4787655456719255,
"avg_score": null,
"num_lines": null
} |
def __init__(self, script, mode=None):
self.script = script
self.mode = mode
self.validateSettings()
def validateSettings(self):
# Check the values of the specified script
try:
assert self.script in SCRIPT_CHOICES
except:
raise ValueError("Script is not a recognized DagCon script!")
# If it's gcon, check that a valid mode was specified
if self.script == 'gcon.py':
try:
assert self.mode in MODE_CHOICES
except:
raise ValueError("Gcon.py runners must specify mode 'r' or 'd'")
# Finally, if the script and options pass, find the absolute paths
self.executable = which( self.script )
####################
# Instance Methods #
####################
def runGcon(self, inputFile, outputFile, refFile=None, name=None):
if outputFile is None:
outputFile = self.getOutputFileName( inputFile )
if name is None:
path, filename = os.path.split( inputFile )
filename, ext = os.path.splitext( filename )
name = filename + '_consensus'
if self.mode == 'r':
assert refFile is not None
p = subprocess.Popen( [self.executable,
self.mode,
inputFile,
refFile,
'--cname', name,
'-o', outputFile] )
p.wait()
elif self.mode == 'd':
p = subprocess.Popen( [self.executable,
self.mode,
inputFile,
'--cname', name,
'-o', outputFile] )
p.wait()
return outputFile
def getOutputFile(self, inputFile):
path, filename = os.path.split( inputFile )
root, ext = os.path.splitext( filename )
outputFile = root + '_consensus.fa'
return os.path.join( path, outputFile )
def __call__(self, inputFile, refFile=None):
outputFile = self.getOutputFile( inputFile )
if os.path.exists( outputFile ):
return outputFile
elif self.script == 'gcon.py':
return self.runGcon( inputFile, outputFile, refFile )
| {
"repo_name": "lufuhao/PB_rDnaTools",
"path": "src/pbrdna/resequence/DagConTools.py",
"copies": "2",
"size": "4518",
"license": "bsd-3-clause",
"hash": 6348441796889369000,
"line_mean": 40.8333333333,
"line_max": 83,
"alpha_frac": 0.5825586543,
"autogenerated": false,
"ratio": 4.643371017471737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6225929671771737,
"avg_score": null,
"num_lines": null
} |
def __init__(self, input_file=None, output_file=None):
if input_file is None:
self.initialize_from_args()
else:
self.initialize_from_call(input_file, output_file)
self.validate_settings()
self.initialize_readers()
def initialize_from_args(self):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_file', metavar='FILE',
help="BasH5 or FOFN to extract from")
parser.add_argument('-o', '--output', default=sys.stdout,
help="Specify a file to output the data to")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--subreads', action='store_true',
help="Output sequences from individual subreads")
group.add_argument('--CCS', action='store_true',
help="Output sequences from CCS reads")
args = parser.parse_args()
self.__dict__.update( vars(args) )
def initialize_from_call(self, input_file, output_file):
self.input_file = input_file
if output_file is None:
self.output = sys.stdout
else:
self.output = output_file
def validate_settings(self):
if self.input_file.endswith('.bas.h5') or \
self.input_file.endswith('.fofn'):
log.info('Creating a BasH5Extractor for "{0}"'.format(self.input_file))
log.info('Outputing extracted reads to "{0}"'.format(self.output))
else:
raise ValueError('Input files must be either FOFN or BasH5!')
def initialize_readers(self):
self.bash5_readers = []
if self.input_file.endswith('.bas.h5'):
self.add_bash5_reader( self.input_file )
elif self.input_file.endswith('.fofn'):
self.parse_fofn_file( self.input_file )
def add_bash5_reader(self, bash5_file):
filepath = os.path.realpath( bash5_file )
path, filename = os.path.split( filepath )
log.info('Creating a BasH5Reader for "{0}"'.format(filename))
self.bash5_readers.append( BasH5Reader( filepath ) )
def parse_fofn_file(self, fofn_file):
with open(fofn_file, 'r') as handle:
for line in handle:
fofn_entry = line.strip()
if not fofn_entry:
continue
if fofn_entry.endswith('.bas.h5'):
self.add_bash5_reader( fofn_entry )
elif fofn_entry.endswith('.bax.h5'):
self.add_bash5_reader( fofn_entry )
else:
raise ValueError('FOFN must contain only BasH5 and BaxH5 files!')
#################
# Class Methods #
#################
@classmethod
def writeCcsFastq(cls, basH5Reader, fastqWriter):
log.info('Writing Fastq CCS reads from "%s"...' % basH5Reader.movieName)
for zmw in basH5Reader:
if zmw.ccsRead:
fastqRecord = FastqRecord(zmw.ccsRead.readName,
zmw.ccsRead.basecalls(),
zmw.ccsRead.QualityValue())
fastqWriter.writeRecord( fastqRecord )
@classmethod
def writeSubreadFastq(cls, basH5Reader, fastqWriter):
log.info('Writing Fastq subreads from "%s"...' % basH5Reader.movieName)
for zmw in basH5Reader:
for subread in zmw.subreads():
fastqRecord = FastqRecord(subread.readName,
subread.basecalls(),
subread.QualityValue())
fastqWriter.writeRecord( fastqRecord )
####################
# Instance Methods #
####################
def outputCcsFastq(self):
log.info('Parsing Fastq CCS reads from input BAS.H5 files')
with FastqWriter(self.output) as writer:
for reader in self.bash5_readers:
self.writeCcsFastq( reader, writer )
def outputSubreadFastq(self):
log.info('Parsing Fastq subreads from input BAS.H5 files')
with FastqWriter(self.output) as writer:
for reader in self.bash5_readers:
self.writeSubreadFastq( reader, writer )
def __call__(self):
if self.CCS:
self.outputCcsFastq()
elif self.subreads:
self.outputSubreadFastq()
if __name__ == '__main__':
extractor = BasH5Extractor()
extractor()
| {
"repo_name": "lufuhao/PB_rDnaTools",
"path": "src/pbrdna/io/BasH5IO.py",
"copies": "2",
"size": "6696",
"license": "bsd-3-clause",
"hash": -7708361357265220000,
"line_mean": 40.85,
"line_max": 85,
"alpha_frac": 0.5979689367,
"autogenerated": false,
"ratio": 4.1256931608133085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5723662097513308,
"avg_score": null,
"num_lines": null
} |
def __init__(self, listFile=None, seqFile=None, distance=None):
if listFile is None or seqFile is None:
self.initializeFromArgs()
else:
self.initializeFromCall(listFile, seqFile, distance)
self.validate_settings()
self.initialize_output()
self.parse_sequence_data()
def initializeFromArgs(self):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('listFile', metavar='FILE',
help="Mothur list file of cluster data")
parser.add_argument('ccsFile', metavar='FILE',
help="Fasta or Fastq file of CCS sequences")
parser.add_argument('sequenceFile', metavar='FILE',
help="BasH5 or FOFN of sequence data")
parser.add_argument('-d', '--distance', type=float,
default=DEFAULT_DIST, metavar='FLOAT',
help="Distance at which to cluster sequences")
parser.add_argument('-f', '--outputFile', default=sys.stdout,
help="Specify output file for consensus sequences")
parser.add_argument('-m', '--minClusterSize', type=int,
default=DEFAULT_MIN_CLUSTER, metavar='INT',
help='Minimum number of CCS reads to resequence')
parser.add_argument('-n', '--numProc', type=int,
default=1, metavar='INT',
help="Number of processors to use")
parser.add_argument('-o', '--outputDir', default='reseq',
help="Specify a directory for intermediate files")
args = parser.parse_args()
self.__dict__.update( vars(args) )
def initializeFromCall(self, listFile, seqFile, distance):
self.listFile = listFile
self.sequenceFile = seqFile
self.distance = DEFAULT_DIST if distance is None else distance
self.output = sys.stdout
def validate_settings(self):
# Check the values of the supplied input and output files
self.listFile = validateInputFile( self.listFile, ['.list'])
self.ccsFile = validateInputFile( self.ccsFile, ['.fastq'])
self.sequenceFile = validateInputFile( self.sequenceFile,
['.bas.h5', '.fofn'])
self.outputFile = validateOutputFile( self.outputFile )
try: # Check the value of the numProc argument
assert self.numProc >= 1
except AssertionError:
raise ValueError('numProc must be a positive integer!')
# Find all of external tools we will need to use
self.filterPlsH5 = which('filterPlsH5.py')
self.compareSequences = which('compareSequences.py')
self.cmph5tools = which('cmph5tools.py')
self.loadPulses = which('loadPulses')
self.variantCaller = which('variantCaller.py')
def initialize_output(self):
createDirectory( self.outputDir )
os.chdir( self.outputDir )
#################
# Class Methods #
#################
@classmethod
def convertDistance(cls, distance):
try:
distance = 'unique' if distance == 'unique' else float(distance)
except:
raise ValueError('"%s" is not a valid distance!' % parts[0])
return distance
####################
# Instance Methods #
####################
def parseDistances(self):
distances = []
with open( self.listFile, 'r' ) as handle:
for line in handle:
parts = line.split()
distance = self.convertDistance( parts[0] )
distances.append( distance )
return distances
def parse_sequence_data(self):
self.sequenceData = {}
for fastqRecord in FastqReader( self.ccsFile ):
zmw = getZmw( fastqRecord.name )
self.sequenceData[zmw] = fastqRecord
def selectDistance(self, distances):
# If our selected distance is present, simply return it
if self.distance in distances:
return self.distance
# Otherwise find the largest clustering distance smaller than
# the specified distance and return that
possible = [d for d in distances if d != 'unique']
smaller = [d for d in possible if d < self.distance]
if not smaller:
raise ValueError('No valid clustering distances found!')
return max(smaller)
def parseClusters( self, targetDist ):
with open( self.listFile, 'r' ) as handle:
for line in handle:
# Skip lines until we find the target distance
parts = line.split()
currDist = self.convertDistance( parts[0] )
if currDist != targetDist:
continue
# Check that the number of clusters is concordant
clusterCount = int(parts[1])
clusters = parts[2:]
assert len(clusters) == clusterCount
# Convert the strings of clusters to Lists and return
clusters = [c.split(',') for c in clusters]
return clusters
def trimClusterNames(self, clusters):
trimmed = []
for cluster in clusters:
cluster = [getZmw(c) for c in cluster]
trimmed.append( frozenset(cluster) )
return trimmed
def getClusterReads(self, cluster):
reads = []
for ccsZmw in cluster:
try:
ccsRead = self.sequenceData[ccsZmw]
except KeyError:
#raise Warning("No CCS read found for '%s', skipping..." % ccsZmw)
continue
reads.append( ccsRead )
return reads
def findLongestRead(self, reads):
lengths = [len(read.sequence) for read in reads]
maxLength = max(lengths)
longestReads = [read for read in reads
if len(read.sequence) == maxLength]
return longestReads[0]
def outputClusterWhitelist(self, cluster, count):
print "Creating Whitelist for Cluster #%s" % count
whiteListFile = 'cluster%s_whitelist.txt' % count
if os.path.exists( whiteListFile ):
return whiteListFile
with open( whiteListFile, 'w' ) as handle:
for zmw in cluster:
handle.write(zmw + '\n')
return whiteListFile
def outputClusterReference(self, reference, count):
print "Creating reference sequence for Cluster #%s" % count
referenceFile = 'cluster%s_reference.fasta' % count
if os.path.exists( referenceFile ):
return referenceFile
# Rename the "Reference" sequence to the cluster
referenceFasta = FastaRecord("Cluster%s" % count,
reference.sequence)
with FastaWriter( referenceFile ) as handle:
handle.writeRecord( referenceFasta )
return referenceFile
def outputRepresentativeRead(self, representativeRead, count):
print "Creating representative sequence file Cluster #%s" % count
representativeFile = 'cluster%s_represent.fastq' % count
if os.path.exists( representativeFile ):
return representativeFile
with FastqWriter( representativeFile ) as handle:
handle.writeRecord( representativeRead )
return representativeFile
def createRgnH5(self, whiteListFile, count):
print "Creating Region Table for Cluster #%s" % count
outputDir = 'cluster%s_regionTables' % count
outputFofn = 'cluster%s_regionTables.fofn' % count
if os.path.exists( outputDir ) and os.path.exists( outputFofn ):
return outputFofn
outputDirArg = '--outputDir=%s' % outputDir
outputFofnArg = '--outputFofn=%s' % outputFofn
filterArg = '--filter=ReadWhitelist=%s,MinReadScore=0.75' % whiteListFile
p = subprocess.Popen( [self.filterPlsH5,
self.sequenceFile,
outputDirArg,
outputFofnArg,
filterArg] )
p.wait()
print "Region Table Created Successfully"
return outputFofn
def createCmpH5(self, referenceFile, rgnH5File, count):
print "Creating a CMP.H5 for Cluster #%s" % count
cmpH5File = 'cluster%s.cmp.h5' % count
if os.path.exists( cmpH5File ):
return cmpH5File
p = subprocess.Popen( [self.compareSequences,
'--minAccuracy=0.75',
'--minLength=500',
'--useQuality',
'--h5pbi',
'--info',
'--nproc=4',
'-x', '-bestn', '1',
'--nproc=%s' % self.numProc,
'--regionTable=%s' % rgnH5File,
'--algorithm=blasr',
'--noiseData=-77.27,0.08654,0.00121',
'--h5fn=%s' % cmpH5File,
self.sequenceFile,
referenceFile] )
p.wait()
return cmpH5File
def sortCmpH5(self, cmph5File, count):
print "Sorting the CmpH5 for Cluster #%s" % count
sortedCmpH5File = 'cluster%s.sorted.cmp.h5' % count
if os.path.exists( sortedCmpH5File ):
return sortedCmpH5File
p = subprocess.Popen( [self.cmph5tools,
'sort',
'--outFile=%s' % sortedCmpH5File,
cmph5File] )
p.wait()
return sortedCmpH5File
def loadPulsesIntoCmpH5(self, sortedCmpH5File, count):
print "Loading pulse data into the CmpH5 for Cluster #%s" % count
p = subprocess.Popen( [self.loadPulses,
self.sequenceFile,
sortedCmpH5File,
'-metrics',
PULSE_METRICS] )
return True
def runQuiver(self, referenceFile, sortedCmpH5File, count):
print "Running Quiver-consensus on Cluster #%s" % count
consensusFile = 'cluster%s_consensus.fastq' % count
if os.path.exists( consensusFile ):
return consensusFile
p = subprocess.Popen( [self.variantCaller,
'--algorithm=quiver',
'--numWorkers=%s' % self.numProc,
'--reference=%s' % referenceFile,
'--outputFile=%s' % consensusFile,
sortedCmpH5File])
p.wait()
return consensusFile
def combineOutputSequences(self, outputSequenceFiles):
print "Combining Consensus and Representative sequences"
outputSequences = []
for filename in outputSequenceFiles:
for record in FastqReader( filename ):
outputSequences.append( record )
return outputSequences
def outputCombinedSequences(self, combinedSequences ):
print "Writing finished sequences to file"
with FastqWriter( self.outputFile ) as handle:
for record in combinedSequences:
handle.writeRecord( record )
def __call__(self):
outputSequenceFiles = []
# Select the appropriate distance, and parse the matching clusters
distances = self.parseDistances()
distance = self.selectDistance( distances )
clusters = self.parseClusters( distance )
trimmedClusters = self.trimClusterNames( clusters )
# Iterate over the clusters, generating consensuses
for count, cluster in enumerate( trimmedClusters ):
count = str(count+1).zfill(4)
print "Analyzing cluster #%s now..." % (count)
reads = self.getClusterReads( cluster )
# If we have enought reads
if len(reads) >= self.minClusterSize:
print "%s ZMWs found (of %s), generating consensus..." % \
(len(reads), len(cluster))
# First we select the longest CCS read from the cluster
longest = self.findLongestRead( reads )
referenceFile = self.outputClusterReference( longest, count )
# Second we create a Rng.H5 file to mask other reads from Blasr
whiteListFile = self.outputClusterWhitelist( cluster, count )
rgnH5File = self.createRgnH5( whiteListFile, count )
# Third we create a sorted CmpH5
cmpH5File = self.createCmpH5( referenceFile, rgnH5File, count )
sortedCmpH5File = self.sortCmpH5( cmpH5File, count )
# Fourth we load rich QV data and run Quiver
self.loadPulsesIntoCmpH5( sortedCmpH5File, count )
consensusFile = self.runQuiver( referenceFile,
sortedCmpH5File,
count )
# Finally we record the name of the output file
outputSequenceFiles.append( consensusFile )
# Otherwise, we select one "Best" read to represent the cluster
else:
print "%s ZMWs found (of %s), skipping consensus..." % \
(len(reads), len(cluster))
reads = self.getClusterReads( cluster )
representRead = reads[0]
representFile = self.outputRepresentativeRead( representRead,
count )
outputSequenceFiles.append( representFile )
# Finally we combine and trim all of the output Files
combinedSequences = self.combineOutputSequences( outputSequenceFiles )
self.outputCombinedSequences( combinedSequences )
if __name__ == '__main__':
resequencer = rDnaResequencer()
resequencer()
| {
"repo_name": "lufuhao/PB_rDnaTools",
"path": "src/pbrdna/rDnaResequencer.py",
"copies": "2",
"size": "16747",
"license": "bsd-3-clause",
"hash": -8621925160506433000,
"line_mean": 44.2621621622,
"line_max": 103,
"alpha_frac": 0.5759240461,
"autogenerated": false,
"ratio": 4.547108335595982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6123032381695982,
"avg_score": null,
"num_lines": null
} |
#! $ANDROID_HOME/tools/bin monkeyrunner
# -*- coding: utf-8 -*-
'''uiparser'''
import os
import sys
import subprocess
import datetime
import logging
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage #pylint: disable=import-error
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
logging.basicConfig(level=logging.DEBUG)
SHORT = 1
MIDDLE = 5
LONG = 15
ADB = os.path.join(os.environ['ANDROID_HOME'], 'platform-tools', 'adb')
# Example of Ctrip Android Apk
TARGET_PACKAGE = 'ctrip.android.view'
LAUNCH_ACTIVITY = 'ctrip.business.splash.CtripSplashActivity'
HOME_ACTIVITY = 'ctrip.android.publicproduct.home.view.CtripHomeActivity'
FLIGHT_ACTIVITY = 'ctrip.android.flight.view.inland.FlightInquireActivity'
START_COMPONENT = TARGET_PACKAGE + '/' + LAUNCH_ACTIVITY
DEVICE_DIR = '/sdcard/uiparser/'
HOST_DIR = './'
def capture(device, index):
''''''
_dumpXML = DEVICE_DIR + index + '.xml'
_localXML = HOST_DIR + index + '.xml'
_localImage = HOST_DIR + index + '.png'
_shell = [ADB, 'shell', 'uiautomator', 'dump', _dumpXML]
logging.debug(datetime.datetime.now())
subprocess.call(_shell) # Stupid uiautomator, always failed here!
logging.debug(datetime.datetime.now())
#MonkeyRunner.sleep(MIDDLE)
_shell = [ADB, 'pull', _dumpXML, _localXML]
subprocess.call(_shell)
_image = device.takeSnapshot()
_image.writeToFile(_localImage, 'png')
def uiparser():
'''Main Entry'''
device = MonkeyRunner.waitForConnection(MIDDLE)
_shell = [ADB, 'shell', 'rm', '-rf', DEVICE_DIR]
subprocess.call(_shell)
_shell = [ADB, 'shell', 'mkdir', '-p', DEVICE_DIR]
subprocess.call(_shell)
device.startActivity(component=START_COMPONENT)
MonkeyRunner.sleep(MIDDLE)
capture(device, str(0))
if __name__ == "__main__":
# MonkeyRunner Jython version is 2.5.3 (Outdated!)
logging.info(sys.version)
uiparser()
| {
"repo_name": "9468305/script",
"path": "monkeyrunner/uiparser.py",
"copies": "1",
"size": "1999",
"license": "mit",
"hash": -8114209801657805000,
"line_mean": 26.0135135135,
"line_max": 106,
"alpha_frac": 0.6858429215,
"autogenerated": false,
"ratio": 3.2241935483870967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9401937186524962,
"avg_score": 0.0016198566724267658,
"num_lines": 74
} |
### $ANTLR 2.7.6 (2005-12-22): "yacc.g" -> "YaccLexer.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
### header action <<<
### preamble action >>>
### preamble action <<<
### >>>The Literals<<<
literals = {}
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
ID = 4
COLON = 5
SEMICOLON = 6
CHAR = 7
STRING = 8
ERROR = 9
PREC = 10
ACTION = 11
OR = 12
HYPHEN = 13
CARROT = 14
BANG = 15
LETTER = 16
DIGIT = 17
COMMENT = 18
WS = 19
class Lexer(antlr.CharScanner) :
### user action >>>
### user action <<<
def __init__(self, *argv, **kwargs) :
antlr.CharScanner.__init__(self, *argv, **kwargs)
self.caseSensitiveLiterals = True
self.setCaseSensitive(True)
self.literals = literals
def nextToken(self):
while True:
try: ### try again ..
while True:
_token = None
_ttype = INVALID_TYPE
self.resetText()
try: ## for char stream error handling
try: ##for lexical error handling
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u':':
pass
self.mCOLON(True)
theRetToken = self._returnToken
elif la1 and la1 in u';':
pass
self.mSEMICOLON(True)
theRetToken = self._returnToken
elif la1 and la1 in u'-':
pass
self.mHYPHEN(True)
theRetToken = self._returnToken
elif la1 and la1 in u'^':
pass
self.mCARROT(True)
theRetToken = self._returnToken
elif la1 and la1 in u'!':
pass
self.mBANG(True)
theRetToken = self._returnToken
elif la1 and la1 in u'|':
pass
self.mOR(True)
theRetToken = self._returnToken
elif la1 and la1 in u'%':
pass
self.mPREC(True)
theRetToken = self._returnToken
elif la1 and la1 in u'\'':
pass
self.mCHAR(True)
theRetToken = self._returnToken
elif la1 and la1 in u'"':
pass
self.mSTRING(True)
theRetToken = self._returnToken
elif la1 and la1 in u'{':
pass
self.mACTION(True)
theRetToken = self._returnToken
elif la1 and la1 in u'/':
pass
self.mCOMMENT(True)
theRetToken = self._returnToken
elif la1 and la1 in u'\t\n\r ':
pass
self.mWS(True)
theRetToken = self._returnToken
else:
if (self.LA(1)==u'e') and (self.LA(2)==u'r') and (self.LA(3)==u'r') and (self.LA(4)==u'o') and (self.LA(5)==u'r') and (True) and (True):
pass
self.mERROR(True)
theRetToken = self._returnToken
elif (_tokenSet_0.member(self.LA(1))) and (True) and (True) and (True) and (True) and (True) and (True):
pass
self.mID(True)
theRetToken = self._returnToken
else:
self.default(self.LA(1))
if not self._returnToken:
raise antlr.TryAgain ### found SKIP token
### option { testLiterals=true }
self.testForLiteral(self._returnToken)
### return token to caller
return self._returnToken
### handle lexical errors ....
except antlr.RecognitionException, e:
raise antlr.TokenStreamRecognitionException(e)
### handle char stream errors ...
except antlr.CharStreamException,cse:
if isinstance(cse, antlr.CharStreamIOException):
raise antlr.TokenStreamIOException(cse.io)
else:
raise antlr.TokenStreamException(str(cse))
except antlr.TryAgain:
pass
def mCOLON(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = COLON
_saveIndex = 0
pass
self.match(':')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSEMICOLON(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = SEMICOLON
_saveIndex = 0
pass
self.match(';')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mHYPHEN(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = HYPHEN
_saveIndex = 0
pass
self.match('-')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCARROT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = CARROT
_saveIndex = 0
pass
self.match('^')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mBANG(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = BANG
_saveIndex = 0
pass
self.match('!')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mOR(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = OR
_saveIndex = 0
pass
self.match('|')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mPREC(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = PREC
_saveIndex = 0
pass
self.match("%prec")
self.set_return_token(_createToken, _token, _ttype, _begin)
def mERROR(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = ERROR
_saveIndex = 0
pass
self.match("error")
self.set_return_token(_createToken, _token, _ttype, _begin)
def mID(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = ID
_saveIndex = 0
pass
self.mLETTER(False)
while True:
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz':
pass
self.mLETTER(False)
elif la1 and la1 in u'0123456789':
pass
self.mDIGIT(False)
elif la1 and la1 in u'_':
pass
self.match('_')
elif la1 and la1 in u'.':
pass
self.match('.')
else:
break
self.set_return_token(_createToken, _token, _ttype, _begin)
def mLETTER(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = LETTER
_saveIndex = 0
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u'abcdefghijklmnopqrstuvwxyz':
pass
self.matchRange(u'a', u'z')
elif la1 and la1 in u'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
pass
self.matchRange(u'A', u'Z')
else:
self.raise_NoViableAlt(self.LA(1))
self.set_return_token(_createToken, _token, _ttype, _begin)
def mDIGIT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = DIGIT
_saveIndex = 0
pass
pass
self.matchRange(u'0', u'9')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCHAR(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = CHAR
_saveIndex = 0
pass
self.match('\'')
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u'\\':
pass
pass
self.match('\\')
self.matchNot(antlr.EOF_CHAR)
elif la1 and la1 in u'\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\t\n\u000b\u000c\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !"#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f':
pass
pass
self.match(_tokenSet_1)
else:
self.raise_NoViableAlt(self.LA(1))
self.match('\'')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSTRING(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = STRING
_saveIndex = 0
pass
self.match('"')
while True:
if (_tokenSet_2.member(self.LA(1))):
pass
self.matchNot('"')
else:
break
self.match('"')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mACTION(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = ACTION
_saveIndex = 0
pass
self.match('{')
lcount= 1
incomment= False
indquote= False
insquote= False
while lcount != 0:
if self.LA(1) == '\\':
self.consume()
elif self.LA(1) == '/' and self.LA(2) == '*':
if not indquote and not insquote:
incomment= True
self.consume()
elif self.LA(1) == '*' and self.LA(2) == '/':
if not indquote and not insquote:
incomment= False
self.consume()
elif self.LA(1) == '\'':
if not indquote and not incomment:
insquote= not insquote
elif self.LA(1) == '"':
if not insquote and not incomment:
indquote= not indquote
elif self.LA(1) == antlr.EOF:
_ttype = antlr.EOF
elif self.LA(1) == '\n':
self.newline()
elif not indquote and not insquote and not incomment:
if self.LA(1)== '}':
lcount -= 1
elif self.LA(1)== '{':
lcount += 1
self.consume()
_ttype = antlr.SKIP;
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCOMMENT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = COMMENT
_saveIndex = 0
pass
self.match("/*")
while True:
if (self.LA(1)==u'*') and (_tokenSet_3.member(self.LA(2))):
pass
pass
self.match('*')
self.match(_tokenSet_3)
elif (_tokenSet_4.member(self.LA(1))):
pass
self.match(_tokenSet_4)
else:
break
self.match("*/")
_ttype = antlr.SKIP
self.set_return_token(_createToken, _token, _ttype, _begin)
def mWS(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = WS
_saveIndex = 0
pass
_cnt50= 0
while True:
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u' ':
pass
self.match(' ')
elif la1 and la1 in u'\r':
pass
self.match('\r')
self.match('\n')
self.newline()
elif la1 and la1 in u'\n':
pass
self.match('\n')
self.newline()
elif la1 and la1 in u'\t':
pass
self.match('\t')
else:
break
_cnt50 += 1
if _cnt50 < 1:
self.raise_NoViableAlt(self.LA(1))
_ttype = antlr.SKIP
self.set_return_token(_createToken, _token, _ttype, _begin)
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 0L, 576460743847706622L, 0L, 0L]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
### generate bit set
def mk_tokenSet_1():
### var1
data = [ -549755813889L, -268435457L, 0L, 0L]
return data
_tokenSet_1 = antlr.BitSet(mk_tokenSet_1())
### generate bit set
def mk_tokenSet_2():
### var1
data = [ -17179869185L, -1L, 0L, 0L]
return data
_tokenSet_2 = antlr.BitSet(mk_tokenSet_2())
### generate bit set
def mk_tokenSet_3():
### var1
data = [ -140737488355329L, -1L, 0L, 0L]
return data
_tokenSet_3 = antlr.BitSet(mk_tokenSet_3())
### generate bit set
def mk_tokenSet_4():
### var1
data = [ -4398046511105L, -1L, 0L, 0L]
return data
_tokenSet_4 = antlr.BitSet(mk_tokenSet_4())
### __main__ header action >>>
if __name__ == '__main__' :
import sys
import antlr
import YaccLexer
### create lexer - shall read from stdin
try:
for token in YaccLexer.Lexer():
print token
except antlr.TokenStreamException, e:
print "error: exception caught while lexing: ", e
### __main__ header action <<<
| {
"repo_name": "darkrsw/safe",
"path": "lib/deckard/src/ptgen/YaccLexer.py",
"copies": "3",
"size": "15607",
"license": "bsd-3-clause",
"hash": 4365403030418061000,
"line_mean": 31.3126293996,
"line_max": 311,
"alpha_frac": 0.441724867,
"autogenerated": false,
"ratio": 4.1897986577181205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.613152352471812,
"avg_score": null,
"num_lines": null
} |
### $ANTLR 2.7.6 (2005-12-22): "yacc.g" -> "YaccParser.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
ID = 4
COLON = 5
SEMICOLON = 6
CHAR = 7
STRING = 8
ERROR = 9
PREC = 10
ACTION = 11
OR = 12
HYPHEN = 13
CARROT = 14
BANG = 15
LETTER = 16
DIGIT = 17
COMMENT = 18
WS = 19
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.NonTerminals= set([])
self.Terminals= set([])
self.Rules=[]
### __init__ header action <<<
def grammar(self):
try: ## for error handling
pass
_cnt3= 0
while True:
if (self.LA(1)==ID):
pass
self.rule()
else:
break
_cnt3 += 1
if _cnt3 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(EOF_TYPE)
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
def rule(self):
id = None
try: ## for error handling
pass
pass
id = self.LT(1)
self.match(ID)
self.NonTerminals.add(id.getText())
self.match(COLON)
self.rhs(id.getText())
self.match(SEMICOLON)
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_1)
def rhs(self,
lhs
):
id = None
c = None
str = None
pi = None
pc = None
right=[]
try: ## for error handling
pass
while True:
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ID]:
pass
pass
id = self.LT(1)
self.match(ID)
right.append(("node",id.getText()))
if id.getText() == id.getText().lower():
self.NonTerminals.add(id.getText())
else:
self.Terminals.add(id.getText())
elif la1 and la1 in [CHAR]:
pass
pass
c = self.LT(1)
self.match(CHAR)
right.append(("node",c.getText()))
self.Terminals.add(c.getText())
elif la1 and la1 in [STRING]:
pass
pass
str = self.LT(1)
self.match(STRING)
right.append(("node",str.getText()))
self.Terminals.add(str.getText())
elif la1 and la1 in [ERROR]:
pass
self.match(ERROR)
right.append(("error","error"))
elif la1 and la1 in [PREC]:
pass
self.match(PREC)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ID]:
pass
pass
pi = self.LT(1)
self.match(ID)
right.append(("%prec","%prec "+pi.getText()))
elif la1 and la1 in [CHAR]:
pass
pass
pc = self.LT(1)
self.match(CHAR)
right.append(("%prec","%prec "+pc.getText()))
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
elif la1 and la1 in [ACTION]:
pass
self.match(ACTION)
else:
break
self.Rules.append( (lhs,right) )
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [OR]:
pass
self.match(OR)
self.rhs(lhs)
elif la1 and la1 in [SEMICOLON]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_2)
def rulespec(self):
try: ## for error handling
pass
self.match(HYPHEN)
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
def treespec(self):
try: ## for error handling
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [CARROT]:
pass
self.match(CARROT)
elif la1 and la1 in [BANG]:
pass
self.match(BANG)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"ID",
"COLON",
"SEMICOLON",
"CHAR",
"STRING",
"ERROR",
"PREC",
"ACTION",
"OR",
"HYPHEN",
"CARROT",
"BANG",
"LETTER",
"DIGIT",
"COMMENT",
"WS"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 2L, 0L]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
### generate bit set
def mk_tokenSet_1():
### var1
data = [ 18L, 0L]
return data
_tokenSet_1 = antlr.BitSet(mk_tokenSet_1())
### generate bit set
def mk_tokenSet_2():
### var1
data = [ 64L, 0L]
return data
_tokenSet_2 = antlr.BitSet(mk_tokenSet_2())
| {
"repo_name": "daejunpark/jsaf",
"path": "third_party/deckard/src/ptgen/YaccParser.py",
"copies": "3",
"size": "7103",
"license": "bsd-3-clause",
"hash": 5657232878070735000,
"line_mean": 25.405204461,
"line_max": 92,
"alpha_frac": 0.4237646065,
"autogenerated": false,
"ratio": 4.2004730928444705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.612423769934447,
"avg_score": null,
"num_lines": null
} |
### $ANTLR 2.7.7 (20060930): "xlwt/excel-formula.g" -> "ExcelFormulaParser.py"$
### import antlr and other modules ..
import sys
from arelle.xlwt import antlr
### header action >>>
import struct
from arelle.xlwt import Utils
from arelle.xlwt.UnicodeUtils import upack1
from arelle.xlwt.ExcelMagic import *
_RVAdelta = {"R": 0, "V": 0x20, "A": 0x40}
_RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20}
_RVAdeltaArea = {"R": 0, "V": 0x20, "A": 0x40, "D": 0}
class FormulaParseException(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from arelle.xlwt.antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.rpn = b""
self.sheet_references = []
self.xcall_references = []
### __init__ header action <<<
def formula(self):
self.expr("V")
def expr(self, arg_type):
self.prec0_expr(arg_type)
while True:
if ((self.LA(1) >= EQ and self.LA(1) <= LE)):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQ]:
pass
self.match(EQ)
op = struct.pack('B', ptgEQ)
elif la1 and la1 in [NE]:
pass
self.match(NE)
op = struct.pack('B', ptgNE)
elif la1 and la1 in [GT]:
pass
self.match(GT)
op = struct.pack('B', ptgGT)
elif la1 and la1 in [LT]:
pass
self.match(LT)
op = struct.pack('B', ptgLT)
elif la1 and la1 in [GE]:
pass
self.match(GE)
op = struct.pack('B', ptgGE)
elif la1 and la1 in [LE]:
pass
self.match(LE)
op = struct.pack('B', ptgLE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec0_expr(arg_type)
self.rpn += op
else:
break
def prec0_expr(self,
arg_type
):
pass
self.prec1_expr(arg_type)
while True:
if (self.LA(1)==CONCAT):
pass
pass
self.match(CONCAT)
op = struct.pack('B', ptgConcat)
self.prec1_expr(arg_type)
self.rpn += op
else:
break
def prec1_expr(self,
arg_type
):
pass
self.prec2_expr(arg_type)
while True:
if (self.LA(1)==ADD or self.LA(1)==SUB):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ADD]:
pass
self.match(ADD)
op = struct.pack('B', ptgAdd)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
op = struct.pack('B', ptgSub)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec2_expr(arg_type)
self.rpn += op;
# print "**prec1_expr4 %s" % arg_type
else:
break
def prec2_expr(self,
arg_type
):
pass
self.prec3_expr(arg_type)
while True:
if (self.LA(1)==MUL or self.LA(1)==DIV):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [MUL]:
pass
self.match(MUL)
op = struct.pack('B', ptgMul)
elif la1 and la1 in [DIV]:
pass
self.match(DIV)
op = struct.pack('B', ptgDiv)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec3_expr(arg_type)
self.rpn += op
else:
break
def prec3_expr(self,
arg_type
):
pass
self.prec4_expr(arg_type)
while True:
if (self.LA(1)==POWER):
pass
pass
self.match(POWER)
op = struct.pack('B', ptgPower)
self.prec4_expr(arg_type)
self.rpn += op
else:
break
def prec4_expr(self,
arg_type
):
pass
self.prec5_expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PERCENT]:
pass
self.match(PERCENT)
self.rpn += struct.pack('B', ptgPercent)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def prec5_expr(self,
arg_type
):
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,LP,REF2D]:
pass
self.primary(arg_type)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
self.primary(arg_type)
self.rpn += struct.pack('B', ptgUminus)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def primary(self,
arg_type
):
str_tok = None
int_tok = None
num_tok = None
ref2d_tok = None
ref2d1_tok = None
ref2d2_tok = None
ref3d_ref2d = None
ref3d_ref2d2 = None
name_tok = None
func_tok = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST]:
pass
self.match(TRUE_CONST)
self.rpn += struct.pack("2B", ptgBool, 1)
elif la1 and la1 in [FALSE_CONST]:
pass
self.match(FALSE_CONST)
self.rpn += struct.pack("2B", ptgBool, 0)
elif la1 and la1 in [STR_CONST]:
pass
str_tok = self.LT(1)
self.match(STR_CONST)
self.rpn += struct.pack("B", ptgStr) + upack1(str_tok.text[1:-1].replace("\"\"", "\""))
elif la1 and la1 in [NUM_CONST]:
pass
num_tok = self.LT(1)
self.match(NUM_CONST)
self.rpn += struct.pack("<Bd", ptgNum, float(num_tok.text))
elif la1 and la1 in [FUNC_IF]:
pass
self.match(FUNC_IF)
self.match(LP)
self.expr("V")
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x02, 0) # tAttrIf
pos0 = len(self.rpn) - 2
self.expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 0) # tAttrSkip
pos1 = len(self.rpn) - 2
self.rpn = self.rpn[:pos0] + struct.pack("<H", pos1-pos0) + self.rpn[pos0+2:]
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 3) # tAttrSkip
self.rpn += struct.pack("<BBH", ptgFuncVarR, 3, 1) # 3 = nargs, 1 = IF func
pos2 = len(self.rpn)
self.rpn = self.rpn[:pos1] + struct.pack("<H", pos2-(pos1+2)-1) + self.rpn[pos1+2:]
elif la1 and la1 in [FUNC_CHOOSE]:
pass
self.match(FUNC_CHOOSE)
arg_type = b"R"
rpn_chunks = []
self.match(LP)
self.expr("V")
rpn_start = len(self.rpn)
ref_markers = [len(self.sheet_references)]
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
mark = len(self.rpn)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
rpn_chunks.append(self.rpn[mark:])
ref_markers.append(len(self.sheet_references))
else:
break
self.match(RP)
self.rpn = self.rpn[:rpn_start]
nc = len(rpn_chunks)
chunklens = [len(chunk) for chunk in rpn_chunks]
skiplens = [0] * nc
skiplens[-1] = 3
for ic in range(nc-1, 0, -1):
skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4
jump_pos = [2 * nc + 2]
for ic in range(nc):
jump_pos.append(jump_pos[-1] + chunklens[ic] + 4)
chunk_shift = 2 * nc + 6 # size of tAttrChoose
for ic in range(nc):
for refx in range(ref_markers[ic], ref_markers[ic+1]):
ref = self.sheet_references[refx]
self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift)
chunk_shift += 4 # size of tAttrSkip
choose_rpn = []
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x04, nc)) # 0x04 is tAttrChoose
choose_rpn.append(struct.pack("<%dH" % (nc+1), *jump_pos))
for ic in range(nc):
choose_rpn.append(rpn_chunks[ic])
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x08, skiplens[ic])) # 0x08 is tAttrSkip
choose_rpn.append(struct.pack("<BBH", ptgFuncVarV, nc+1, 100)) # 100 is CHOOSE fn
self.rpn += b"".join(choose_rpn)
elif la1 and la1 in [LP]:
pass
self.match(LP)
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("B", ptgParen)
else:
if (self.LA(1)==INT_CONST) and (_tokenSet_0.member(self.LA(2))):
pass
int_tok = self.LT(1)
self.match(INT_CONST)
# print "**int_const", int_tok.text
int_value = int(int_tok.text)
if int_value <= 65535:
self.rpn += struct.pack("<BH", ptgInt, int_value)
else:
self.rpn += struct.pack("<Bd", ptgNum, float(int_value))
elif (self.LA(1)==REF2D) and (_tokenSet_0.member(self.LA(2))):
pass
ref2d_tok = self.LT(1)
self.match(REF2D)
# print "**ref2d %s %s" % (ref2d_tok.text, arg_type)
r, c = Utils.cell_to_packed_rowcol(ref2d_tok.text)
ptg = ptgRefR + _RVAdeltaRef[arg_type]
self.rpn += struct.pack("<B2H", ptg, r, c)
elif (self.LA(1)==REF2D) and (self.LA(2)==COLON):
pass
ref2d1_tok = self.LT(1)
self.match(REF2D)
self.match(COLON)
ref2d2_tok = self.LT(1)
self.match(REF2D)
r1, c1 = Utils.cell_to_packed_rowcol(ref2d1_tok.text)
r2, c2 = Utils.cell_to_packed_rowcol(ref2d2_tok.text)
ptg = ptgAreaR + _RVAdeltaArea[arg_type]
self.rpn += struct.pack("<B4H", ptg, r1, r2, c1, c2)
elif (self.LA(1)==INT_CONST or self.LA(1)==NAME or self.LA(1)==QUOTENAME) and (self.LA(2)==COLON or self.LA(2)==BANG):
pass
sheet1=self.sheet()
sheet2 = sheet1
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
sheet2=self.sheet()
elif la1 and la1 in [BANG]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(BANG)
ref3d_ref2d = self.LT(1)
self.match(REF2D)
ptg = ptgRef3dR + _RVAdeltaRef[arg_type]
rpn_ref2d = b""
r1, c1 = Utils.cell_to_packed_rowcol(ref3d_ref2d.text)
rpn_ref2d = struct.pack("<3H", 0x0000, r1, c1)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
ref3d_ref2d2 = self.LT(1)
self.match(REF2D)
ptg = ptgArea3dR + _RVAdeltaArea[arg_type]
r2, c2 = Utils.cell_to_packed_rowcol(ref3d_ref2d2.text)
rpn_ref2d = struct.pack("<5H", 0x0000, r1, r2, c1, c2)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,PERCENT,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<B", ptg)
self.sheet_references.append((sheet1, sheet2, len(self.rpn)))
self.rpn += rpn_ref2d
elif (self.LA(1)==NAME) and (_tokenSet_0.member(self.LA(2))):
name_tok = self.LT(1)
self.match(NAME)
raise Exception("[formula] found unexpected NAME token (%r)" % name_tok.txt)
# #### TODO: handle references to defined names here
elif (self.LA(1)==NAME) and (self.LA(2)==LP):
func_tok = self.LT(1)
self.match(NAME)
func_toku = func_tok.text.upper()
if func_toku in all_funcs_by_name:
(opcode,
min_argc,
max_argc,
func_type,
arg_type_str) = all_funcs_by_name[func_toku]
arg_type_list = list(arg_type_str)
else:
raise Exception("[formula] unknown function (%s)" % func_tok.text)
# print "**func_tok1 %s %s" % (func_toku, func_type)
xcall = opcode < 0
if xcall:
# The name of the add-in function is passed as the 1st arg
# of the hidden XCALL function
self.xcall_references.append((func_toku, len(self.rpn) + 1))
self.rpn += struct.pack("<BHHH",
ptgNameXR,
0xadde, # ##PATCHME## index to REF entry in EXTERNSHEET record
0xefbe, # ##PATCHME## one-based index to EXTERNNAME record
0x0000) # unused
self.match(LP)
arg_count=self.expr_list(arg_type_list, min_argc, max_argc)
self.match(RP)
if arg_count > max_argc or arg_count < min_argc:
raise Exception("%d parameters for function: %s" % (arg_count, func_tok.text))
if xcall:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count + 1, 255) # 255 is magic XCALL function
elif min_argc == max_argc:
func_ptg = ptgFuncR + _RVAdelta[func_type]
self.rpn += struct.pack("<BH", func_ptg, opcode)
elif arg_count == 1 and func_tok.text.upper() == "SUM":
self.rpn += struct.pack("<BBH", ptgAttr, 0x10, 0) # tAttrSum
else:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count, opcode)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def sheet(self):
ref = None
sheet_ref_name = None
sheet_ref_int = None
sheet_ref_quote = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [NAME]:
sheet_ref_name = self.LT(1)
self.match(NAME)
ref = sheet_ref_name.text
elif la1 and la1 in [INT_CONST]:
sheet_ref_int = self.LT(1)
self.match(INT_CONST)
ref = sheet_ref_int.text
elif la1 and la1 in [QUOTENAME]:
sheet_ref_quote = self.LT(1)
self.match(QUOTENAME)
ref = sheet_ref_quote.text[1:-1].replace("''", "'")
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return ref
def expr_list(self,
arg_type_list, min_argc, max_argc
):
arg_cnt = None
arg_cnt = 0
arg_type = arg_type_list[arg_cnt]
# print "**expr_list1[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
arg_cnt += 1
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
if arg_cnt < len(arg_type_list):
arg_type = arg_type_list[arg_cnt]
else:
arg_type = arg_type_list[-1]
if arg_type == "+":
arg_type = arg_type_list[-2]
# print "**expr_list2[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
arg_cnt += 1
else:
break
elif la1 and la1 in [RP]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return arg_cnt
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"TRUE_CONST",
"FALSE_CONST",
"STR_CONST",
"NUM_CONST",
"INT_CONST",
"FUNC_IF",
"FUNC_CHOOSE",
"NAME",
"QUOTENAME",
"EQ",
"NE",
"GT",
"LT",
"GE",
"LE",
"ADD",
"SUB",
"MUL",
"DIV",
"POWER",
"PERCENT",
"LP",
"RP",
"LB",
"RB",
"COLON",
"COMMA",
"SEMICOLON",
"REF2D",
"REF2D_R1C1",
"BANG",
"CONCAT"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 37681618946, 0]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
| {
"repo_name": "sternshus/arelle2.7",
"path": "svr-2.7/arelle/xlwt/ExcelFormulaParser.py",
"copies": "1",
"size": "22206",
"license": "apache-2.0",
"hash": -1909032448531912000,
"line_mean": 32.6965098634,
"line_max": 143,
"alpha_frac": 0.4560929479,
"autogenerated": false,
"ratio": 3.5746941403734707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9486907363785178,
"avg_score": 0.008775944897658618,
"num_lines": 659
} |
### $ANTLR 2.7.7 (20060930): "xlwt/excel-formula.g" -> "ExcelFormulaParser.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
import struct
import Utils
from UnicodeUtils import upack1
from ExcelMagic import *
_RVAdelta = {"R": 0, "V": 0x20, "A": 0x40}
_RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20}
_RVAdeltaArea = {"R": 0, "V": 0x20, "A": 0x40, "D": 0}
class FormulaParseException(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.rpn = ""
self.sheet_references = []
self.xcall_references = []
### __init__ header action <<<
def formula(self):
pass
self.expr("V")
def expr(self,
arg_type
):
pass
self.prec0_expr(arg_type)
while True:
if ((self.LA(1) >= EQ and self.LA(1) <= LE)):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQ]:
pass
self.match(EQ)
op = struct.pack('B', ptgEQ)
elif la1 and la1 in [NE]:
pass
self.match(NE)
op = struct.pack('B', ptgNE)
elif la1 and la1 in [GT]:
pass
self.match(GT)
op = struct.pack('B', ptgGT)
elif la1 and la1 in [LT]:
pass
self.match(LT)
op = struct.pack('B', ptgLT)
elif la1 and la1 in [GE]:
pass
self.match(GE)
op = struct.pack('B', ptgGE)
elif la1 and la1 in [LE]:
pass
self.match(LE)
op = struct.pack('B', ptgLE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec0_expr(arg_type)
self.rpn += op
else:
break
def prec0_expr(self,
arg_type
):
pass
self.prec1_expr(arg_type)
while True:
if (self.LA(1)==CONCAT):
pass
pass
self.match(CONCAT)
op = struct.pack('B', ptgConcat)
self.prec1_expr(arg_type)
self.rpn += op
else:
break
def prec1_expr(self,
arg_type
):
pass
self.prec2_expr(arg_type)
while True:
if (self.LA(1)==ADD or self.LA(1)==SUB):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ADD]:
pass
self.match(ADD)
op = struct.pack('B', ptgAdd)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
op = struct.pack('B', ptgSub)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec2_expr(arg_type)
self.rpn += op;
# print "**prec1_expr4 %s" % arg_type
else:
break
def prec2_expr(self,
arg_type
):
pass
self.prec3_expr(arg_type)
while True:
if (self.LA(1)==MUL or self.LA(1)==DIV):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [MUL]:
pass
self.match(MUL)
op = struct.pack('B', ptgMul)
elif la1 and la1 in [DIV]:
pass
self.match(DIV)
op = struct.pack('B', ptgDiv)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec3_expr(arg_type)
self.rpn += op
else:
break
def prec3_expr(self,
arg_type
):
pass
self.prec4_expr(arg_type)
while True:
if (self.LA(1)==POWER):
pass
pass
self.match(POWER)
op = struct.pack('B', ptgPower)
self.prec4_expr(arg_type)
self.rpn += op
else:
break
def prec4_expr(self,
arg_type
):
pass
self.prec5_expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PERCENT]:
pass
self.match(PERCENT)
self.rpn += struct.pack('B', ptgPercent)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def prec5_expr(self,
arg_type
):
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,LP,REF2D]:
pass
self.primary(arg_type)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
self.primary(arg_type)
self.rpn += struct.pack('B', ptgUminus)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def primary(self,
arg_type
):
str_tok = None
int_tok = None
num_tok = None
ref2d_tok = None
ref2d1_tok = None
ref2d2_tok = None
ref3d_ref2d = None
ref3d_ref2d2 = None
name_tok = None
func_tok = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST]:
pass
self.match(TRUE_CONST)
self.rpn += struct.pack("2B", ptgBool, 1)
elif la1 and la1 in [FALSE_CONST]:
pass
self.match(FALSE_CONST)
self.rpn += struct.pack("2B", ptgBool, 0)
elif la1 and la1 in [STR_CONST]:
pass
str_tok = self.LT(1)
self.match(STR_CONST)
self.rpn += struct.pack("B", ptgStr) + upack1(str_tok.text[1:-1].replace("\"\"", "\""))
elif la1 and la1 in [NUM_CONST]:
pass
num_tok = self.LT(1)
self.match(NUM_CONST)
self.rpn += struct.pack("<Bd", ptgNum, float(num_tok.text))
elif la1 and la1 in [FUNC_IF]:
pass
self.match(FUNC_IF)
self.match(LP)
self.expr("V")
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x02, 0) # tAttrIf
pos0 = len(self.rpn) - 2
self.expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 0) # tAttrSkip
pos1 = len(self.rpn) - 2
self.rpn = self.rpn[:pos0] + struct.pack("<H", pos1-pos0) + self.rpn[pos0+2:]
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 3) # tAttrSkip
self.rpn += struct.pack("<BBH", ptgFuncVarR, 3, 1) # 3 = nargs, 1 = IF func
pos2 = len(self.rpn)
self.rpn = self.rpn[:pos1] + struct.pack("<H", pos2-(pos1+2)-1) + self.rpn[pos1+2:]
elif la1 and la1 in [FUNC_CHOOSE]:
pass
self.match(FUNC_CHOOSE)
arg_type = "R"
rpn_chunks = []
self.match(LP)
self.expr("V")
rpn_start = len(self.rpn)
ref_markers = [len(self.sheet_references)]
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
mark = len(self.rpn)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
rpn_chunks.append(self.rpn[mark:])
ref_markers.append(len(self.sheet_references))
else:
break
self.match(RP)
self.rpn = self.rpn[:rpn_start]
nc = len(rpn_chunks)
chunklens = [len(chunk) for chunk in rpn_chunks]
skiplens = [0] * nc
skiplens[-1] = 3
for ic in xrange(nc-1, 0, -1):
skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4
jump_pos = [2 * nc + 2]
for ic in xrange(nc):
jump_pos.append(jump_pos[-1] + chunklens[ic] + 4)
chunk_shift = 2 * nc + 6 # size of tAttrChoose
for ic in xrange(nc):
for refx in xrange(ref_markers[ic], ref_markers[ic+1]):
ref = self.sheet_references[refx]
self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift)
chunk_shift += 4 # size of tAttrSkip
choose_rpn = []
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x04, nc)) # 0x04 is tAttrChoose
choose_rpn.append(struct.pack("<%dH" % (nc+1), *jump_pos))
for ic in xrange(nc):
choose_rpn.append(rpn_chunks[ic])
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x08, skiplens[ic])) # 0x08 is tAttrSkip
choose_rpn.append(struct.pack("<BBH", ptgFuncVarV, nc+1, 100)) # 100 is CHOOSE fn
self.rpn += "".join(choose_rpn)
elif la1 and la1 in [LP]:
pass
self.match(LP)
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("B", ptgParen)
else:
if (self.LA(1)==INT_CONST) and (_tokenSet_0.member(self.LA(2))):
pass
int_tok = self.LT(1)
self.match(INT_CONST)
# print "**int_const", int_tok.text
int_value = int(int_tok.text)
if int_value <= 65535:
self.rpn += struct.pack("<BH", ptgInt, int_value)
else:
self.rpn += struct.pack("<Bd", ptgNum, float(int_value))
elif (self.LA(1)==REF2D) and (_tokenSet_0.member(self.LA(2))):
pass
ref2d_tok = self.LT(1)
self.match(REF2D)
# print "**ref2d %s %s" % (ref2d_tok.text, arg_type)
r, c = Utils.cell_to_packed_rowcol(ref2d_tok.text)
ptg = ptgRefR + _RVAdeltaRef[arg_type]
self.rpn += struct.pack("<B2H", ptg, r, c)
elif (self.LA(1)==REF2D) and (self.LA(2)==COLON):
pass
ref2d1_tok = self.LT(1)
self.match(REF2D)
self.match(COLON)
ref2d2_tok = self.LT(1)
self.match(REF2D)
r1, c1 = Utils.cell_to_packed_rowcol(ref2d1_tok.text)
r2, c2 = Utils.cell_to_packed_rowcol(ref2d2_tok.text)
ptg = ptgAreaR + _RVAdeltaArea[arg_type]
self.rpn += struct.pack("<B4H", ptg, r1, r2, c1, c2)
elif (self.LA(1)==INT_CONST or self.LA(1)==NAME or self.LA(1)==QUOTENAME) and (self.LA(2)==COLON or self.LA(2)==BANG):
pass
sheet1=self.sheet()
sheet2 = sheet1
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
sheet2=self.sheet()
elif la1 and la1 in [BANG]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(BANG)
ref3d_ref2d = self.LT(1)
self.match(REF2D)
ptg = ptgRef3dR + _RVAdeltaRef[arg_type]
rpn_ref2d = ""
r1, c1 = Utils.cell_to_packed_rowcol(ref3d_ref2d.text)
rpn_ref2d = struct.pack("<3H", 0x0000, r1, c1)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
ref3d_ref2d2 = self.LT(1)
self.match(REF2D)
ptg = ptgArea3dR + _RVAdeltaArea[arg_type]
r2, c2 = Utils.cell_to_packed_rowcol(ref3d_ref2d2.text)
rpn_ref2d = struct.pack("<5H", 0x0000, r1, r2, c1, c2)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,PERCENT,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<B", ptg)
self.sheet_references.append((sheet1, sheet2, len(self.rpn)))
self.rpn += rpn_ref2d
elif (self.LA(1)==NAME) and (_tokenSet_0.member(self.LA(2))):
pass
name_tok = self.LT(1)
self.match(NAME)
raise Exception("[formula] found unexpected NAME token (%r)" % name_tok.txt)
# #### TODO: handle references to defined names here
elif (self.LA(1)==NAME) and (self.LA(2)==LP):
pass
func_tok = self.LT(1)
self.match(NAME)
func_toku = func_tok.text.upper()
if func_toku in all_funcs_by_name:
(opcode,
min_argc,
max_argc,
func_type,
arg_type_str) = all_funcs_by_name[func_toku]
arg_type_list = list(arg_type_str)
else:
raise Exception("[formula] unknown function (%s)" % func_tok.text)
# print "**func_tok1 %s %s" % (func_toku, func_type)
xcall = opcode < 0
if xcall:
# The name of the add-in function is passed as the 1st arg
# of the hidden XCALL function
self.xcall_references.append((func_toku, len(self.rpn) + 1))
self.rpn += struct.pack("<BHHH",
ptgNameXR,
0xadde, # ##PATCHME## index to REF entry in EXTERNSHEET record
0xefbe, # ##PATCHME## one-based index to EXTERNNAME record
0x0000) # unused
self.match(LP)
arg_count=self.expr_list(arg_type_list, min_argc, max_argc)
self.match(RP)
if arg_count > max_argc or arg_count < min_argc:
raise Exception, "%d parameters for function: %s" % (arg_count, func_tok.text)
if xcall:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count + 1, 255) # 255 is magic XCALL function
elif min_argc == max_argc:
func_ptg = ptgFuncR + _RVAdelta[func_type]
self.rpn += struct.pack("<BH", func_ptg, opcode)
elif arg_count == 1 and func_tok.text.upper() == "SUM":
self.rpn += struct.pack("<BBH", ptgAttr, 0x10, 0) # tAttrSum
else:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count, opcode)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def sheet(self):
ref = None
sheet_ref_name = None
sheet_ref_int = None
sheet_ref_quote = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [NAME]:
pass
sheet_ref_name = self.LT(1)
self.match(NAME)
ref = sheet_ref_name.text
elif la1 and la1 in [INT_CONST]:
pass
sheet_ref_int = self.LT(1)
self.match(INT_CONST)
ref = sheet_ref_int.text
elif la1 and la1 in [QUOTENAME]:
pass
sheet_ref_quote = self.LT(1)
self.match(QUOTENAME)
ref = sheet_ref_quote.text[1:-1].replace("''", "'")
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return ref
def expr_list(self,
arg_type_list, min_argc, max_argc
):
arg_cnt = None
arg_cnt = 0
arg_type = arg_type_list[arg_cnt]
# print "**expr_list1[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
arg_cnt += 1
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
if arg_cnt < len(arg_type_list):
arg_type = arg_type_list[arg_cnt]
else:
arg_type = arg_type_list[-1]
if arg_type == "+":
arg_type = arg_type_list[-2]
# print "**expr_list2[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
arg_cnt += 1
else:
break
elif la1 and la1 in [RP]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return arg_cnt
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"TRUE_CONST",
"FALSE_CONST",
"STR_CONST",
"NUM_CONST",
"INT_CONST",
"FUNC_IF",
"FUNC_CHOOSE",
"NAME",
"QUOTENAME",
"EQ",
"NE",
"GT",
"LT",
"GE",
"LE",
"ADD",
"SUB",
"MUL",
"DIV",
"POWER",
"PERCENT",
"LP",
"RP",
"LB",
"RB",
"COLON",
"COMMA",
"SEMICOLON",
"REF2D",
"REF2D_R1C1",
"BANG",
"CONCAT"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 37681618946L, 0L]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
| {
"repo_name": "chirilo/remo",
"path": "vendor-local/lib/python/tablib/packages/xlwt/ExcelFormulaParser.py",
"copies": "69",
"size": "22362",
"license": "bsd-3-clause",
"hash": 8553294643542764000,
"line_mean": 32.0310192024,
"line_max": 143,
"alpha_frac": 0.4545657812,
"autogenerated": false,
"ratio": 3.5888300433317286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
### $ANTLR 2.7.7 (20060930): "xlwt/excel-formula.g" -> "ExcelFormulaParser.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
import struct
import Utils
from UnicodeUtils import upack1
from ExcelMagic import *
_RVAdelta = {"R": 0, "V": 0x20, "A": 0x40}
_RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20}
_RVAdeltaArea = {"R": 0, "V": 0x20, "A": 0x40, "D": 0}
class FormulaParseException(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.rpn = ""
self.sheet_references = []
self.xcall_references = []
### __init__ header action <<<
def formula(self):
pass
self.expr("V")
def expr(self,
arg_type
):
pass
self.prec0_expr(arg_type)
while True:
if ((self.LA(1) >= EQ and self.LA(1) <= LE)):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQ]:
pass
self.match(EQ)
op = struct.pack('B', ptgEQ)
elif la1 and la1 in [NE]:
pass
self.match(NE)
op = struct.pack('B', ptgNE)
elif la1 and la1 in [GT]:
pass
self.match(GT)
op = struct.pack('B', ptgGT)
elif la1 and la1 in [LT]:
pass
self.match(LT)
op = struct.pack('B', ptgLT)
elif la1 and la1 in [GE]:
pass
self.match(GE)
op = struct.pack('B', ptgGE)
elif la1 and la1 in [LE]:
pass
self.match(LE)
op = struct.pack('B', ptgLE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec0_expr(arg_type)
self.rpn += op
else:
break
def prec0_expr(self,
arg_type
):
pass
self.prec1_expr(arg_type)
while True:
if (self.LA(1) == CONCAT):
pass
pass
self.match(CONCAT)
op = struct.pack('B', ptgConcat)
self.prec1_expr(arg_type)
self.rpn += op
else:
break
def prec1_expr(self,
arg_type
):
pass
self.prec2_expr(arg_type)
while True:
if (self.LA(1) == ADD or self.LA(1) == SUB):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ADD]:
pass
self.match(ADD)
op = struct.pack('B', ptgAdd)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
op = struct.pack('B', ptgSub)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec2_expr(arg_type)
self.rpn += op;
# print "**prec1_expr4 %s" % arg_type
else:
break
def prec2_expr(self,
arg_type
):
pass
self.prec3_expr(arg_type)
while True:
if (self.LA(1) == MUL or self.LA(1) == DIV):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [MUL]:
pass
self.match(MUL)
op = struct.pack('B', ptgMul)
elif la1 and la1 in [DIV]:
pass
self.match(DIV)
op = struct.pack('B', ptgDiv)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec3_expr(arg_type)
self.rpn += op
else:
break
def prec3_expr(self,
arg_type
):
pass
self.prec4_expr(arg_type)
while True:
if (self.LA(1) == POWER):
pass
pass
self.match(POWER)
op = struct.pack('B', ptgPower)
self.prec4_expr(arg_type)
self.rpn += op
else:
break
def prec4_expr(self,
arg_type
):
pass
self.prec5_expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PERCENT]:
pass
self.match(PERCENT)
self.rpn += struct.pack('B', ptgPercent)
elif la1 and la1 in [EOF, EQ, NE, GT, LT, GE, LE, ADD, SUB, MUL, DIV, POWER, RP, COMMA, SEMICOLON, CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def prec5_expr(self,
arg_type
):
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST, FALSE_CONST, STR_CONST, NUM_CONST, INT_CONST, FUNC_IF, FUNC_CHOOSE, NAME,
QUOTENAME, LP, REF2D]:
pass
self.primary(arg_type)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
self.primary(arg_type)
self.rpn += struct.pack('B', ptgUminus)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def primary(self,
arg_type
):
str_tok = None
int_tok = None
num_tok = None
ref2d_tok = None
ref2d1_tok = None
ref2d2_tok = None
ref3d_ref2d = None
ref3d_ref2d2 = None
name_tok = None
func_tok = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST]:
pass
self.match(TRUE_CONST)
self.rpn += struct.pack("2B", ptgBool, 1)
elif la1 and la1 in [FALSE_CONST]:
pass
self.match(FALSE_CONST)
self.rpn += struct.pack("2B", ptgBool, 0)
elif la1 and la1 in [STR_CONST]:
pass
str_tok = self.LT(1)
self.match(STR_CONST)
self.rpn += struct.pack("B", ptgStr) + upack1(str_tok.text[1:-1].replace("\"\"", "\""))
elif la1 and la1 in [NUM_CONST]:
pass
num_tok = self.LT(1)
self.match(NUM_CONST)
self.rpn += struct.pack("<Bd", ptgNum, float(num_tok.text))
elif la1 and la1 in [FUNC_IF]:
pass
self.match(FUNC_IF)
self.match(LP)
self.expr("V")
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x02, 0) # tAttrIf
pos0 = len(self.rpn) - 2
self.expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 0) # tAttrSkip
pos1 = len(self.rpn) - 2
self.rpn = self.rpn[:pos0] + struct.pack("<H", pos1 - pos0) + self.rpn[pos0 + 2:]
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 3) # tAttrSkip
self.rpn += struct.pack("<BBH", ptgFuncVarR, 3, 1) # 3 = nargs, 1 = IF func
pos2 = len(self.rpn)
self.rpn = self.rpn[:pos1] + struct.pack("<H", pos2 - (pos1 + 2) - 1) + self.rpn[pos1 + 2:]
elif la1 and la1 in [FUNC_CHOOSE]:
pass
self.match(FUNC_CHOOSE)
arg_type = "R"
rpn_chunks = []
self.match(LP)
self.expr("V")
rpn_start = len(self.rpn)
ref_markers = [len(self.sheet_references)]
while True:
if (self.LA(1) == COMMA or self.LA(1) == SEMICOLON):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
mark = len(self.rpn)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST, FALSE_CONST, STR_CONST, NUM_CONST, INT_CONST, FUNC_IF, FUNC_CHOOSE,
NAME, QUOTENAME, SUB, LP, REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP, COMMA, SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
rpn_chunks.append(self.rpn[mark:])
ref_markers.append(len(self.sheet_references))
else:
break
self.match(RP)
self.rpn = self.rpn[:rpn_start]
nc = len(rpn_chunks)
chunklens = [len(chunk) for chunk in rpn_chunks]
skiplens = [0] * nc
skiplens[-1] = 3
for ic in xrange(nc - 1, 0, -1):
skiplens[ic - 1] = skiplens[ic] + chunklens[ic] + 4
jump_pos = [2 * nc + 2]
for ic in xrange(nc):
jump_pos.append(jump_pos[-1] + chunklens[ic] + 4)
chunk_shift = 2 * nc + 6 # size of tAttrChoose
for ic in xrange(nc):
for refx in xrange(ref_markers[ic], ref_markers[ic + 1]):
ref = self.sheet_references[refx]
self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift)
chunk_shift += 4 # size of tAttrSkip
choose_rpn = []
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x04, nc)) # 0x04 is tAttrChoose
choose_rpn.append(struct.pack("<%dH" % (nc + 1), *jump_pos))
for ic in xrange(nc):
choose_rpn.append(rpn_chunks[ic])
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x08, skiplens[ic])) # 0x08 is tAttrSkip
choose_rpn.append(struct.pack("<BBH", ptgFuncVarV, nc + 1, 100)) # 100 is CHOOSE fn
self.rpn += "".join(choose_rpn)
elif la1 and la1 in [LP]:
pass
self.match(LP)
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("B", ptgParen)
else:
if (self.LA(1) == INT_CONST) and (_tokenSet_0.member(self.LA(2))):
pass
int_tok = self.LT(1)
self.match(INT_CONST)
# print "**int_const", int_tok.text
int_value = int(int_tok.text)
if int_value <= 65535:
self.rpn += struct.pack("<BH", ptgInt, int_value)
else:
self.rpn += struct.pack("<Bd", ptgNum, float(int_value))
elif (self.LA(1) == REF2D) and (_tokenSet_0.member(self.LA(2))):
pass
ref2d_tok = self.LT(1)
self.match(REF2D)
# print "**ref2d %s %s" % (ref2d_tok.text, arg_type)
r, c = Utils.cell_to_packed_rowcol(ref2d_tok.text)
ptg = ptgRefR + _RVAdeltaRef[arg_type]
self.rpn += struct.pack("<B2H", ptg, r, c)
elif (self.LA(1) == REF2D) and (self.LA(2) == COLON):
pass
ref2d1_tok = self.LT(1)
self.match(REF2D)
self.match(COLON)
ref2d2_tok = self.LT(1)
self.match(REF2D)
r1, c1 = Utils.cell_to_packed_rowcol(ref2d1_tok.text)
r2, c2 = Utils.cell_to_packed_rowcol(ref2d2_tok.text)
ptg = ptgAreaR + _RVAdeltaArea[arg_type]
self.rpn += struct.pack("<B4H", ptg, r1, r2, c1, c2)
elif (self.LA(1) == INT_CONST or self.LA(1) == NAME or self.LA(1) == QUOTENAME) and (
self.LA(2) == COLON or self.LA(2) == BANG):
pass
sheet1 = self.sheet()
sheet2 = sheet1
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
sheet2 = self.sheet()
elif la1 and la1 in [BANG]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(BANG)
ref3d_ref2d = self.LT(1)
self.match(REF2D)
ptg = ptgRef3dR + _RVAdeltaRef[arg_type]
rpn_ref2d = ""
r1, c1 = Utils.cell_to_packed_rowcol(ref3d_ref2d.text)
rpn_ref2d = struct.pack("<3H", 0x0000, r1, c1)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
ref3d_ref2d2 = self.LT(1)
self.match(REF2D)
ptg = ptgArea3dR + _RVAdeltaArea[arg_type]
r2, c2 = Utils.cell_to_packed_rowcol(ref3d_ref2d2.text)
rpn_ref2d = struct.pack("<5H", 0x0000, r1, r2, c1, c2)
elif la1 and la1 in [EOF, EQ, NE, GT, LT, GE, LE, ADD, SUB, MUL, DIV, POWER, PERCENT, RP, COMMA,
SEMICOLON, CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<B", ptg)
self.sheet_references.append((sheet1, sheet2, len(self.rpn)))
self.rpn += rpn_ref2d
elif (self.LA(1) == NAME) and (_tokenSet_0.member(self.LA(2))):
pass
name_tok = self.LT(1)
self.match(NAME)
raise Exception("[formula] found unexpected NAME token (%r)" % name_tok.txt)
# #### TODO: handle references to defined names here
elif (self.LA(1) == NAME) and (self.LA(2) == LP):
pass
func_tok = self.LT(1)
self.match(NAME)
func_toku = func_tok.text.upper()
if func_toku in all_funcs_by_name:
(opcode,
min_argc,
max_argc,
func_type,
arg_type_str) = all_funcs_by_name[func_toku]
arg_type_list = list(arg_type_str)
else:
raise Exception("[formula] unknown function (%s)" % func_tok.text)
# print "**func_tok1 %s %s" % (func_toku, func_type)
xcall = opcode < 0
if xcall:
# The name of the add-in function is passed as the 1st arg
# of the hidden XCALL function
self.xcall_references.append((func_toku, len(self.rpn) + 1))
self.rpn += struct.pack("<BHHH",
ptgNameXR,
0xadde, # ##PATCHME## index to REF entry in EXTERNSHEET record
0xefbe, # ##PATCHME## one-based index to EXTERNNAME record
0x0000) # unused
self.match(LP)
arg_count = self.expr_list(arg_type_list, min_argc, max_argc)
self.match(RP)
if arg_count > max_argc or arg_count < min_argc:
raise Exception, "%d parameters for function: %s" % (arg_count, func_tok.text)
if xcall:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count + 1, 255) # 255 is magic XCALL function
elif min_argc == max_argc:
func_ptg = ptgFuncR + _RVAdelta[func_type]
self.rpn += struct.pack("<BH", func_ptg, opcode)
elif arg_count == 1 and func_tok.text.upper() == "SUM":
self.rpn += struct.pack("<BBH", ptgAttr, 0x10, 0) # tAttrSum
else:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count, opcode)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def sheet(self):
ref = None
sheet_ref_name = None
sheet_ref_int = None
sheet_ref_quote = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [NAME]:
pass
sheet_ref_name = self.LT(1)
self.match(NAME)
ref = sheet_ref_name.text
elif la1 and la1 in [INT_CONST]:
pass
sheet_ref_int = self.LT(1)
self.match(INT_CONST)
ref = sheet_ref_int.text
elif la1 and la1 in [QUOTENAME]:
pass
sheet_ref_quote = self.LT(1)
self.match(QUOTENAME)
ref = sheet_ref_quote.text[1:-1].replace("''", "'")
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return ref
def expr_list(self,
arg_type_list, min_argc, max_argc
):
arg_cnt = None
arg_cnt = 0
arg_type = arg_type_list[arg_cnt]
# print "**expr_list1[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST, FALSE_CONST, STR_CONST, NUM_CONST, INT_CONST, FUNC_IF, FUNC_CHOOSE, NAME,
QUOTENAME, SUB, LP, REF2D]:
pass
self.expr(arg_type)
arg_cnt += 1
while True:
if (self.LA(1) == COMMA or self.LA(1) == SEMICOLON):
pass
if arg_cnt < len(arg_type_list):
arg_type = arg_type_list[arg_cnt]
else:
arg_type = arg_type_list[-1]
if arg_type == "+":
arg_type = arg_type_list[-2]
# print "**expr_list2[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST, FALSE_CONST, STR_CONST, NUM_CONST, INT_CONST, FUNC_IF, FUNC_CHOOSE,
NAME, QUOTENAME, SUB, LP, REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP, COMMA, SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
arg_cnt += 1
else:
break
elif la1 and la1 in [RP]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return arg_cnt
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"TRUE_CONST",
"FALSE_CONST",
"STR_CONST",
"NUM_CONST",
"INT_CONST",
"FUNC_IF",
"FUNC_CHOOSE",
"NAME",
"QUOTENAME",
"EQ",
"NE",
"GT",
"LT",
"GE",
"LE",
"ADD",
"SUB",
"MUL",
"DIV",
"POWER",
"PERCENT",
"LP",
"RP",
"LB",
"RB",
"COLON",
"COMMA",
"SEMICOLON",
"REF2D",
"REF2D_R1C1",
"BANG",
"CONCAT"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [37681618946L, 0L]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
| {
"repo_name": "rhefner1/ghidonations",
"path": "xlwt/ExcelFormulaParser.py",
"copies": "1",
"size": "22904",
"license": "apache-2.0",
"hash": 1802339211981135000,
"line_mean": 32.6823529412,
"line_max": 120,
"alpha_frac": 0.4429357317,
"autogenerated": false,
"ratio": 3.6488768519993626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.958305206742496,
"avg_score": 0.0017521032548803658,
"num_lines": 680
} |
### $ANTLR 2.7.7 (20060930): "xlwt/excel-formula.g" -> "ExcelFormulaParser.py"$
### import antlr and other modules ..
import sys
from . import antlr
### header action >>>
import struct
from . import Utils
from .UnicodeUtils import upack1
from .ExcelMagic import *
_RVAdelta = {"R": 0, "V": 0x20, "A": 0x40}
_RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20}
_RVAdeltaArea = {"R": 0, "V": 0x20, "A": 0x40, "D": 0}
class FormulaParseException(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from .antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.rpn = b""
self.sheet_references = []
self.xcall_references = []
### __init__ header action <<<
def formula(self):
self.expr("V")
def expr(self, arg_type):
self.prec0_expr(arg_type)
while True:
if ((self.LA(1) >= EQ and self.LA(1) <= LE)):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQ]:
pass
self.match(EQ)
op = struct.pack('B', ptgEQ)
elif la1 and la1 in [NE]:
pass
self.match(NE)
op = struct.pack('B', ptgNE)
elif la1 and la1 in [GT]:
pass
self.match(GT)
op = struct.pack('B', ptgGT)
elif la1 and la1 in [LT]:
pass
self.match(LT)
op = struct.pack('B', ptgLT)
elif la1 and la1 in [GE]:
pass
self.match(GE)
op = struct.pack('B', ptgGE)
elif la1 and la1 in [LE]:
pass
self.match(LE)
op = struct.pack('B', ptgLE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec0_expr(arg_type)
self.rpn += op
else:
break
def prec0_expr(self,
arg_type
):
pass
self.prec1_expr(arg_type)
while True:
if (self.LA(1)==CONCAT):
pass
pass
self.match(CONCAT)
op = struct.pack('B', ptgConcat)
self.prec1_expr(arg_type)
self.rpn += op
else:
break
def prec1_expr(self,
arg_type
):
pass
self.prec2_expr(arg_type)
while True:
if (self.LA(1)==ADD or self.LA(1)==SUB):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ADD]:
pass
self.match(ADD)
op = struct.pack('B', ptgAdd)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
op = struct.pack('B', ptgSub)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec2_expr(arg_type)
self.rpn += op;
# print "**prec1_expr4 %s" % arg_type
else:
break
def prec2_expr(self,
arg_type
):
pass
self.prec3_expr(arg_type)
while True:
if (self.LA(1)==MUL or self.LA(1)==DIV):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [MUL]:
pass
self.match(MUL)
op = struct.pack('B', ptgMul)
elif la1 and la1 in [DIV]:
pass
self.match(DIV)
op = struct.pack('B', ptgDiv)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec3_expr(arg_type)
self.rpn += op
else:
break
def prec3_expr(self,
arg_type
):
pass
self.prec4_expr(arg_type)
while True:
if (self.LA(1)==POWER):
pass
pass
self.match(POWER)
op = struct.pack('B', ptgPower)
self.prec4_expr(arg_type)
self.rpn += op
else:
break
def prec4_expr(self,
arg_type
):
pass
self.prec5_expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PERCENT]:
pass
self.match(PERCENT)
self.rpn += struct.pack('B', ptgPercent)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def prec5_expr(self,
arg_type
):
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,LP,REF2D]:
pass
self.primary(arg_type)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
self.primary(arg_type)
self.rpn += struct.pack('B', ptgUminus)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def primary(self,
arg_type
):
str_tok = None
int_tok = None
num_tok = None
ref2d_tok = None
ref2d1_tok = None
ref2d2_tok = None
ref3d_ref2d = None
ref3d_ref2d2 = None
name_tok = None
func_tok = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST]:
pass
self.match(TRUE_CONST)
self.rpn += struct.pack("2B", ptgBool, 1)
elif la1 and la1 in [FALSE_CONST]:
pass
self.match(FALSE_CONST)
self.rpn += struct.pack("2B", ptgBool, 0)
elif la1 and la1 in [STR_CONST]:
pass
str_tok = self.LT(1)
self.match(STR_CONST)
self.rpn += struct.pack("B", ptgStr) + upack1(str_tok.text[1:-1].replace("\"\"", "\""))
elif la1 and la1 in [NUM_CONST]:
pass
num_tok = self.LT(1)
self.match(NUM_CONST)
self.rpn += struct.pack("<Bd", ptgNum, float(num_tok.text))
elif la1 and la1 in [FUNC_IF]:
pass
self.match(FUNC_IF)
self.match(LP)
self.expr("V")
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x02, 0) # tAttrIf
pos0 = len(self.rpn) - 2
self.expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 0) # tAttrSkip
pos1 = len(self.rpn) - 2
self.rpn = self.rpn[:pos0] + struct.pack("<H", pos1-pos0) + self.rpn[pos0+2:]
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 3) # tAttrSkip
self.rpn += struct.pack("<BBH", ptgFuncVarR, 3, 1) # 3 = nargs, 1 = IF func
pos2 = len(self.rpn)
self.rpn = self.rpn[:pos1] + struct.pack("<H", pos2-(pos1+2)-1) + self.rpn[pos1+2:]
elif la1 and la1 in [FUNC_CHOOSE]:
pass
self.match(FUNC_CHOOSE)
arg_type = b"R"
rpn_chunks = []
self.match(LP)
self.expr("V")
rpn_start = len(self.rpn)
ref_markers = [len(self.sheet_references)]
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
mark = len(self.rpn)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
rpn_chunks.append(self.rpn[mark:])
ref_markers.append(len(self.sheet_references))
else:
break
self.match(RP)
self.rpn = self.rpn[:rpn_start]
nc = len(rpn_chunks)
chunklens = [len(chunk) for chunk in rpn_chunks]
skiplens = [0] * nc
skiplens[-1] = 3
for ic in range(nc-1, 0, -1):
skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4
jump_pos = [2 * nc + 2]
for ic in range(nc):
jump_pos.append(jump_pos[-1] + chunklens[ic] + 4)
chunk_shift = 2 * nc + 6 # size of tAttrChoose
for ic in range(nc):
for refx in range(ref_markers[ic], ref_markers[ic+1]):
ref = self.sheet_references[refx]
self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift)
chunk_shift += 4 # size of tAttrSkip
choose_rpn = []
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x04, nc)) # 0x04 is tAttrChoose
choose_rpn.append(struct.pack("<%dH" % (nc+1), *jump_pos))
for ic in range(nc):
choose_rpn.append(rpn_chunks[ic])
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x08, skiplens[ic])) # 0x08 is tAttrSkip
choose_rpn.append(struct.pack("<BBH", ptgFuncVarV, nc+1, 100)) # 100 is CHOOSE fn
self.rpn += b"".join(choose_rpn)
elif la1 and la1 in [LP]:
pass
self.match(LP)
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("B", ptgParen)
else:
if (self.LA(1)==INT_CONST) and (_tokenSet_0.member(self.LA(2))):
pass
int_tok = self.LT(1)
self.match(INT_CONST)
# print "**int_const", int_tok.text
int_value = int(int_tok.text)
if int_value <= 65535:
self.rpn += struct.pack("<BH", ptgInt, int_value)
else:
self.rpn += struct.pack("<Bd", ptgNum, float(int_value))
elif (self.LA(1)==REF2D) and (_tokenSet_0.member(self.LA(2))):
pass
ref2d_tok = self.LT(1)
self.match(REF2D)
# print "**ref2d %s %s" % (ref2d_tok.text, arg_type)
r, c = Utils.cell_to_packed_rowcol(ref2d_tok.text)
ptg = ptgRefR + _RVAdeltaRef[arg_type]
self.rpn += struct.pack("<B2H", ptg, r, c)
elif (self.LA(1)==REF2D) and (self.LA(2)==COLON):
pass
ref2d1_tok = self.LT(1)
self.match(REF2D)
self.match(COLON)
ref2d2_tok = self.LT(1)
self.match(REF2D)
r1, c1 = Utils.cell_to_packed_rowcol(ref2d1_tok.text)
r2, c2 = Utils.cell_to_packed_rowcol(ref2d2_tok.text)
ptg = ptgAreaR + _RVAdeltaArea[arg_type]
self.rpn += struct.pack("<B4H", ptg, r1, r2, c1, c2)
elif (self.LA(1)==INT_CONST or self.LA(1)==NAME or self.LA(1)==QUOTENAME) and (self.LA(2)==COLON or self.LA(2)==BANG):
pass
sheet1=self.sheet()
sheet2 = sheet1
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
sheet2=self.sheet()
elif la1 and la1 in [BANG]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(BANG)
ref3d_ref2d = self.LT(1)
self.match(REF2D)
ptg = ptgRef3dR + _RVAdeltaRef[arg_type]
rpn_ref2d = b""
r1, c1 = Utils.cell_to_packed_rowcol(ref3d_ref2d.text)
rpn_ref2d = struct.pack("<3H", 0x0000, r1, c1)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
ref3d_ref2d2 = self.LT(1)
self.match(REF2D)
ptg = ptgArea3dR + _RVAdeltaArea[arg_type]
r2, c2 = Utils.cell_to_packed_rowcol(ref3d_ref2d2.text)
rpn_ref2d = struct.pack("<5H", 0x0000, r1, r2, c1, c2)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,PERCENT,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<B", ptg)
self.sheet_references.append((sheet1, sheet2, len(self.rpn)))
self.rpn += rpn_ref2d
elif (self.LA(1)==NAME) and (_tokenSet_0.member(self.LA(2))):
name_tok = self.LT(1)
self.match(NAME)
raise Exception("[formula] found unexpected NAME token (%r)" % name_tok.txt)
# #### TODO: handle references to defined names here
elif (self.LA(1)==NAME) and (self.LA(2)==LP):
func_tok = self.LT(1)
self.match(NAME)
func_toku = func_tok.text.upper()
if func_toku in all_funcs_by_name:
(opcode,
min_argc,
max_argc,
func_type,
arg_type_str) = all_funcs_by_name[func_toku]
arg_type_list = list(arg_type_str)
else:
raise Exception("[formula] unknown function (%s)" % func_tok.text)
# print "**func_tok1 %s %s" % (func_toku, func_type)
xcall = opcode < 0
if xcall:
# The name of the add-in function is passed as the 1st arg
# of the hidden XCALL function
self.xcall_references.append((func_toku, len(self.rpn) + 1))
self.rpn += struct.pack("<BHHH",
ptgNameXR,
0xadde, # ##PATCHME## index to REF entry in EXTERNSHEET record
0xefbe, # ##PATCHME## one-based index to EXTERNNAME record
0x0000) # unused
self.match(LP)
arg_count=self.expr_list(arg_type_list, min_argc, max_argc)
self.match(RP)
if arg_count > max_argc or arg_count < min_argc:
raise Exception("%d parameters for function: %s" % (arg_count, func_tok.text))
if xcall:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count + 1, 255) # 255 is magic XCALL function
elif min_argc == max_argc:
func_ptg = ptgFuncR + _RVAdelta[func_type]
self.rpn += struct.pack("<BH", func_ptg, opcode)
elif arg_count == 1 and func_tok.text.upper() == "SUM":
self.rpn += struct.pack("<BBH", ptgAttr, 0x10, 0) # tAttrSum
else:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count, opcode)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def sheet(self):
ref = None
sheet_ref_name = None
sheet_ref_int = None
sheet_ref_quote = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [NAME]:
sheet_ref_name = self.LT(1)
self.match(NAME)
ref = sheet_ref_name.text
elif la1 and la1 in [INT_CONST]:
sheet_ref_int = self.LT(1)
self.match(INT_CONST)
ref = sheet_ref_int.text
elif la1 and la1 in [QUOTENAME]:
sheet_ref_quote = self.LT(1)
self.match(QUOTENAME)
ref = sheet_ref_quote.text[1:-1].replace("''", "'")
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return ref
def expr_list(self,
arg_type_list, min_argc, max_argc
):
arg_cnt = None
arg_cnt = 0
arg_type = arg_type_list[arg_cnt]
# print "**expr_list1[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
arg_cnt += 1
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
if arg_cnt < len(arg_type_list):
arg_type = arg_type_list[arg_cnt]
else:
arg_type = arg_type_list[-1]
if arg_type == "+":
arg_type = arg_type_list[-2]
# print "**expr_list2[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
arg_cnt += 1
else:
break
elif la1 and la1 in [RP]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return arg_cnt
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"TRUE_CONST",
"FALSE_CONST",
"STR_CONST",
"NUM_CONST",
"INT_CONST",
"FUNC_IF",
"FUNC_CHOOSE",
"NAME",
"QUOTENAME",
"EQ",
"NE",
"GT",
"LT",
"GE",
"LE",
"ADD",
"SUB",
"MUL",
"DIV",
"POWER",
"PERCENT",
"LP",
"RP",
"LB",
"RB",
"COLON",
"COMMA",
"SEMICOLON",
"REF2D",
"REF2D_R1C1",
"BANG",
"CONCAT"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 37681618946, 0]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
| {
"repo_name": "bopo/tablib",
"path": "tablib/packages/xlwt3/ExcelFormulaParser.py",
"copies": "46",
"size": "22812",
"license": "mit",
"hash": -3495928913831055400,
"line_mean": 32.6160849772,
"line_max": 143,
"alpha_frac": 0.4417850254,
"autogenerated": false,
"ratio": 3.6540124939932723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
### $ANTLR 2.7.7 (20160104): "lexed.g" -> "LexedLexer.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
### header action <<<
### preamble action >>>
### preamble action <<<
### >>>The Literals<<<
literals = {}
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
DIGIT = 4
COLON = 5
SPACES = 6
NEWLINE = 7
NUMBER = 8
SYMBOL = 9
QUOTED_LITERAL = 10
CONTENT = 11
class Lexer(antlr.CharScanner) :
### user action >>>
### user action <<<
def __init__(self, *argv, **kwargs) :
antlr.CharScanner.__init__(self, *argv, **kwargs)
self.caseSensitiveLiterals = True
self.setCaseSensitive(True)
self.literals = literals
def nextToken(self):
while True:
try: ### try again ..
while True:
_token = None
_ttype = INVALID_TYPE
self.resetText()
try: ## for char stream error handling
try: ##for lexical error handling
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u':':
pass
self.mCOLON(True)
theRetToken = self._returnToken
elif la1 and la1 in u' ':
pass
self.mSPACES(True)
theRetToken = self._returnToken
elif la1 and la1 in u'\n':
pass
self.mNEWLINE(True)
theRetToken = self._returnToken
elif la1 and la1 in u'0123456789':
pass
self.mNUMBER(True)
theRetToken = self._returnToken
elif la1 and la1 in u'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
pass
self.mSYMBOL(True)
theRetToken = self._returnToken
elif la1 and la1 in u'"':
pass
self.mQUOTED_LITERAL(True)
theRetToken = self._returnToken
elif la1 and la1 in u'(':
pass
self.mCONTENT(True)
theRetToken = self._returnToken
else:
self.default(self.LA(1))
if not self._returnToken:
raise antlr.TryAgain ### found SKIP token
### return token to caller
return self._returnToken
### handle lexical errors ....
except antlr.RecognitionException, e:
raise antlr.TokenStreamRecognitionException(e)
### handle char stream errors ...
except antlr.CharStreamException,cse:
if isinstance(cse, antlr.CharStreamIOException):
raise antlr.TokenStreamIOException(cse.io)
else:
raise antlr.TokenStreamException(str(cse))
except antlr.TryAgain:
pass
def mDIGIT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = DIGIT
_saveIndex = 0
pass
self.matchRange(u'0', u'9')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCOLON(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = COLON
_saveIndex = 0
pass
self.match(':')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSPACES(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = SPACES
_saveIndex = 0
pass
_cnt5= 0
while True:
if (self.LA(1)==u' '):
pass
self.match(' ')
else:
break
_cnt5 += 1
if _cnt5 < 1:
self.raise_NoViableAlt(self.LA(1))
self.set_return_token(_createToken, _token, _ttype, _begin)
def mNEWLINE(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = NEWLINE
_saveIndex = 0
pass
self.match('\n')
self.newline();
self.set_return_token(_createToken, _token, _ttype, _begin)
def mNUMBER(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = NUMBER
_saveIndex = 0
pass
_cnt9= 0
while True:
if ((self.LA(1) >= u'0' and self.LA(1) <= u'9')):
pass
self.mDIGIT(False)
else:
break
_cnt9 += 1
if _cnt9 < 1:
self.raise_NoViableAlt(self.LA(1))
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSYMBOL(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = SYMBOL
_saveIndex = 0
pass
_cnt12= 0
while True:
if ((self.LA(1) >= u'A' and self.LA(1) <= u'Z')):
pass
self.matchRange(u'A', u'Z')
else:
break
_cnt12 += 1
if _cnt12 < 1:
self.raise_NoViableAlt(self.LA(1))
self.set_return_token(_createToken, _token, _ttype, _begin)
def mQUOTED_LITERAL(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = QUOTED_LITERAL
_saveIndex = 0
pass
self.match('"')
_cnt15= 0
while True:
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u'abcdefghijklmnopqrstuvwxyz':
pass
self.matchRange(u'a', u'z')
elif la1 and la1 in u'_':
pass
self.match('_')
else:
break
_cnt15 += 1
if _cnt15 < 1:
self.raise_NoViableAlt(self.LA(1))
self.match('"')
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCONTENT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = CONTENT
_saveIndex = 0
pass
_saveIndex = self.text.length()
self.match('(')
self.text.setLength(_saveIndex)
while True:
### nongreedy exit test
if ((self.LA(1)==u')') and (self.LA(2)==u'\n')):
break
if ((self.LA(1) >= u'\u0000' and self.LA(1) <= u'\u00ff')) and ((self.LA(2) >= u'\u0000' and self.LA(2) <= u'\u00ff')):
pass
self.matchNot(antlr.EOF_CHAR)
else:
break
_saveIndex = self.text.length()
self.match(')')
self.text.setLength(_saveIndex)
_saveIndex = self.text.length()
self.mNEWLINE(False)
self.text.setLength(_saveIndex)
self.set_return_token(_createToken, _token, _ttype, _begin)
### __main__ header action >>>
if __name__ == '__main__' :
import sys
import antlr
import LexedLexer
### create lexer - shall read from stdin
try:
for token in LexedLexer.Lexer():
print token
except antlr.TokenStreamException, e:
print "error: exception caught while lexing: ", e
### __main__ header action <<<
| {
"repo_name": "andybalaam/pepper",
"path": "old/pepper1/src/parse/LexedLexer.py",
"copies": "1",
"size": "8649",
"license": "mit",
"hash": 869102682061540100,
"line_mean": 31.1524163569,
"line_max": 131,
"alpha_frac": 0.4403977338,
"autogenerated": false,
"ratio": 4.3201798201798205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.526057755397982,
"avg_score": null,
"num_lines": null
} |
### $ANTLR 2.7.7 (20160104): "lexed.g" -> "LexedParser.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
DIGIT = 4
COLON = 5
SPACES = 6
NEWLINE = 7
NUMBER = 8
SYMBOL = 9
QUOTED_LITERAL = 10
CONTENT = 11
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
def tokenName(self):
t = None
s = None
l = None
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SYMBOL]:
pass
s = self.LT(1)
self.match(SYMBOL)
t=s
elif la1 and la1 in [QUOTED_LITERAL]:
pass
l = self.LT(1)
self.match(QUOTED_LITERAL)
t=l
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return t
def line(self):
t = None
linenum = None
colnum = None
content = None
try: ## for error handling
pass
linenum = self.LT(1)
self.match(NUMBER)
self.match(COLON)
colnum = self.LT(1)
self.match(NUMBER)
self.match(SPACES)
symbol=self.tokenName()
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [CONTENT]:
pass
content = self.LT(1)
self.match(CONTENT)
elif la1 and la1 in [NEWLINE]:
pass
self.match(NEWLINE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
from antlr import CommonToken
import PepperParser
t = CommonToken(
type = PepperParser._tokenNames.index( symbol.getText() ) )
if content is not None:
t.setText( content.getText() )
t.setLine( int( linenum.getText() ) )
t.setColumn( int( colnum.getText() ) )
except antlr.RecognitionException, ex:
return None
return t
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"DIGIT",
"COLON",
"SPACES",
"NEWLINE",
"NUMBER",
"SYMBOL",
"QUOTED_LITERAL",
"CONTENT"
]
| {
"repo_name": "andybalaam/pepper",
"path": "old/pepper1/src/parse/LexedParser.py",
"copies": "1",
"size": "3046",
"license": "mit",
"hash": -8377346580719194000,
"line_mean": 23.368,
"line_max": 84,
"alpha_frac": 0.4878529219,
"autogenerated": false,
"ratio": 3.798004987531172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4785857909431172,
"avg_score": null,
"num_lines": null
} |
End of preview. Expand
in Dataset Viewer.
Based on tomekkorbak/codeparrot-clean-train-v2-pep8 and codeparrot/codeparrot-clean. We create a combined quality_score
as described below: All open source non-GPL code.
Score is computed as follows:
min(1.0, int(dat['copies'])/20 + dat['ratio']/10 + dat['alpha_frac']* 0.1 + 0.5*(1-dat['avg_score'])) if avg_score exists
min(1.0, int(dat['copies'])/20 + dat['ratio']/10 + dat['alpha_frac']* 0.1) otherwise
avg_score is the pep8 score, lower meaning better.
- Downloads last month
- 35