language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PyCQA__pycodestyle
|
tests/test_parser.py
|
{
"start": 295,
"end": 1250
}
|
class ____(unittest.TestCase):
def test_vanilla_ignore_parsing(self):
contents = b"""
[pycodestyle]
ignore = E226,E24
"""
options, args = _process_file(contents)
self.assertEqual(options.ignore, ["E226", "E24"])
def test_multiline_ignore_parsing(self):
contents = b"""
[pycodestyle]
ignore =
E226,
E24
"""
options, args = _process_file(contents)
self.assertEqual(options.ignore, ["E226", "E24"])
def test_trailing_comma_ignore_parsing(self):
contents = b"""
[pycodestyle]
ignore = E226,
"""
options, args = _process_file(contents)
self.assertEqual(options.ignore, ["E226"])
def test_multiline_trailing_comma_ignore_parsing(self):
contents = b"""
[pycodestyle]
ignore =
E226,
E24,
"""
options, args = _process_file(contents)
self.assertEqual(options.ignore, ["E226", "E24"])
|
ParserTestCase
|
python
|
huggingface__transformers
|
src/transformers/models/janus/modeling_janus.py
|
{
"start": 3292,
"end": 4956
}
|
class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Janus causal language model (or autoregressive) outputs.
"""
)
|
JanusBaseModelOutputWithPast
|
python
|
pymupdf__PyMuPDF
|
src_classic/utils.py
|
{
"start": 95635,
"end": 181938
}
|
class ____(object):
"""Create a new shape."""
@staticmethod
def horizontal_angle(C, P):
"""Return the angle to the horizontal for the connection from C to P.
This uses the arcus sine function and resolves its inherent ambiguity by
looking up in which quadrant vector S = P - C is located.
"""
S = Point(P - C).unit # unit vector 'C' -> 'P'
alfa = math.asin(abs(S.y)) # absolute angle from horizontal
if S.x < 0: # make arcsin result unique
if S.y <= 0: # bottom-left
alfa = -(math.pi - alfa)
else: # top-left
alfa = math.pi - alfa
else:
if S.y >= 0: # top-right
pass
else: # bottom-right
alfa = -alfa
return alfa
def __init__(self, page: Page):
CheckParent(page)
self.page = page
self.doc = page.parent
if not self.doc.is_pdf:
raise ValueError("is no PDF")
self.height = page.mediabox_size.y
self.width = page.mediabox_size.x
self.x = page.cropbox_position.x
self.y = page.cropbox_position.y
self.pctm = page.transformation_matrix # page transf. matrix
self.ipctm = ~self.pctm # inverted transf. matrix
self.draw_cont = ""
self.text_cont = ""
self.totalcont = ""
self.lastPoint = None
self.rect = None
def updateRect(self, x):
if self.rect is None:
if len(x) == 2:
self.rect = Rect(x, x)
else:
self.rect = Rect(x)
else:
if len(x) == 2:
x = Point(x)
self.rect.x0 = min(self.rect.x0, x.x)
self.rect.y0 = min(self.rect.y0, x.y)
self.rect.x1 = max(self.rect.x1, x.x)
self.rect.y1 = max(self.rect.y1, x.y)
else:
x = Rect(x)
self.rect.x0 = min(self.rect.x0, x.x0)
self.rect.y0 = min(self.rect.y0, x.y0)
self.rect.x1 = max(self.rect.x1, x.x1)
self.rect.y1 = max(self.rect.y1, x.y1)
def draw_line(self, p1: point_like, p2: point_like) -> Point:
"""Draw a line between two points."""
p1 = Point(p1)
p2 = Point(p2)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.lastPoint = p1
self.updateRect(p1)
self.draw_cont += "%g %g l\n" % JM_TUPLE(p2 * self.ipctm)
self.updateRect(p2)
self.lastPoint = p2
return self.lastPoint
def draw_polyline(self, points: list) -> Point:
"""Draw several connected line segments."""
for i, p in enumerate(points):
if i == 0:
if not (self.lastPoint == Point(p)):
self.draw_cont += "%g %g m\n" % JM_TUPLE(Point(p) * self.ipctm)
self.lastPoint = Point(p)
else:
self.draw_cont += "%g %g l\n" % JM_TUPLE(Point(p) * self.ipctm)
self.updateRect(p)
self.lastPoint = Point(points[-1])
return self.lastPoint
def draw_bezier(
self,
p1: point_like,
p2: point_like,
p3: point_like,
p4: point_like,
) -> Point:
"""Draw a standard cubic Bezier curve."""
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
p4 = Point(p4)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.draw_cont += "%g %g %g %g %g %g c\n" % JM_TUPLE(
list(p2 * self.ipctm) + list(p3 * self.ipctm) + list(p4 * self.ipctm)
)
self.updateRect(p1)
self.updateRect(p2)
self.updateRect(p3)
self.updateRect(p4)
self.lastPoint = p4
return self.lastPoint
def draw_oval(self, tetra: typing.Union[quad_like, rect_like]) -> Point:
"""Draw an ellipse inside a tetrapod."""
if len(tetra) != 4:
raise ValueError("invalid arg length")
if hasattr(tetra[0], "__float__"):
q = Rect(tetra).quad
else:
q = Quad(tetra)
mt = q.ul + (q.ur - q.ul) * 0.5
mr = q.ur + (q.lr - q.ur) * 0.5
mb = q.ll + (q.lr - q.ll) * 0.5
ml = q.ul + (q.ll - q.ul) * 0.5
if not (self.lastPoint == ml):
self.draw_cont += "%g %g m\n" % JM_TUPLE(ml * self.ipctm)
self.lastPoint = ml
self.draw_curve(ml, q.ll, mb)
self.draw_curve(mb, q.lr, mr)
self.draw_curve(mr, q.ur, mt)
self.draw_curve(mt, q.ul, ml)
self.updateRect(q.rect)
self.lastPoint = ml
return self.lastPoint
def draw_circle(self, center: point_like, radius: float) -> Point:
"""Draw a circle given its center and radius."""
if not radius > EPSILON:
raise ValueError("radius must be positive")
center = Point(center)
p1 = center - (radius, 0)
return self.draw_sector(center, p1, 360, fullSector=False)
def draw_curve(
self,
p1: point_like,
p2: point_like,
p3: point_like,
) -> Point:
"""Draw a curve between points using one control point."""
kappa = 0.55228474983
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
k1 = p1 + (p2 - p1) * kappa
k2 = p3 + (p2 - p3) * kappa
return self.draw_bezier(p1, k1, k2, p3)
def draw_sector(
self,
center: point_like,
point: point_like,
beta: float,
fullSector: bool = True,
) -> Point:
"""Draw a circle sector."""
center = Point(center)
point = Point(point)
l3 = "%g %g m\n"
l4 = "%g %g %g %g %g %g c\n"
l5 = "%g %g l\n"
betar = math.radians(-beta)
w360 = math.radians(math.copysign(360, betar)) * (-1)
w90 = math.radians(math.copysign(90, betar))
w45 = w90 / 2
while abs(betar) > 2 * math.pi:
betar += w360 # bring angle below 360 degrees
if not (self.lastPoint == point):
self.draw_cont += l3 % JM_TUPLE(point * self.ipctm)
self.lastPoint = point
Q = Point(0, 0) # just make sure it exists
C = center
P = point
S = P - C # vector 'center' -> 'point'
rad = abs(S) # circle radius
if not rad > EPSILON:
raise ValueError("radius must be positive")
alfa = self.horizontal_angle(center, point)
while abs(betar) > abs(w90): # draw 90 degree arcs
q1 = C.x + math.cos(alfa + w90) * rad
q2 = C.y + math.sin(alfa + w90) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + w45) * rad / math.cos(w45)
r2 = C.y + math.sin(alfa + w45) * rad / math.cos(w45)
R = Point(r1, r2) # crossing point of tangents
kappah = (1 - math.cos(w45)) * 4 / 3 / abs(R - Q)
kappa = kappah * abs(P - Q)
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4 % JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
)
betar -= w90 # reduce parm angle by 90 deg
alfa += w90 # advance start angle by 90 deg
P = Q # advance to arc end point
# draw (remaining) arc
if abs(betar) > 1e-3: # significant degrees left?
beta2 = betar / 2
q1 = C.x + math.cos(alfa + betar) * rad
q2 = C.y + math.sin(alfa + betar) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + beta2) * rad / math.cos(beta2)
r2 = C.y + math.sin(alfa + beta2) * rad / math.cos(beta2)
R = Point(r1, r2) # crossing point of tangents
# kappa height is 4/3 of segment height
kappah = (1 - math.cos(beta2)) * 4 / 3 / abs(R - Q) # kappa height
kappa = kappah * abs(P - Q) / (1 - math.cos(betar))
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4 % JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
)
if fullSector:
self.draw_cont += l3 % JM_TUPLE(point * self.ipctm)
self.draw_cont += l5 % JM_TUPLE(center * self.ipctm)
self.draw_cont += l5 % JM_TUPLE(Q * self.ipctm)
self.lastPoint = Q
return self.lastPoint
def draw_rect(self, rect: rect_like, *, radius=None) -> Point:
"""Draw a rectangle.
Args:
radius: if not None, the rectangle will have rounded corners.
This is the radius of the curvature, given as percentage of
the rectangle width or height. Valid are values 0 < v <= 0.5.
For a sequence of two values, the corners will have different
radii. Otherwise, the percentage will be computed from the
shorter side. A value of (0.5, 0.5) will draw an ellipse.
"""
r = Rect(rect)
if radius == None: # standard rectangle
self.draw_cont += "%g %g %g %g re\n" % JM_TUPLE(
list(r.bl * self.ipctm) + [r.width, r.height]
)
self.updateRect(r)
self.lastPoint = r.tl
return self.lastPoint
# rounded corners requested. This requires 1 or 2 values, each
# with 0 < value <= 0.5
if hasattr(radius, "__float__"):
if radius <= 0 or radius > 0.5:
raise ValueError(f"bad radius value {radius}.")
d = min(r.width, r.height) * radius
px = (d, 0)
py = (0, d)
elif hasattr(radius, "__len__") and len(radius) == 2:
rx, ry = radius
px = (rx * r.width, 0)
py = (0, ry * r.height)
if min(rx, ry) <= 0 or max(rx, ry) > 0.5:
raise ValueError(f"bad radius value {radius}.")
else:
raise ValueError(f"bad radius value {radius}.")
lp = self.draw_line(r.tl + py, r.bl - py)
lp = self.draw_curve(lp, r.bl, r.bl + px)
lp = self.draw_line(lp, r.br - px)
lp = self.draw_curve(lp, r.br, r.br - py)
lp = self.draw_line(lp, r.tr + py)
lp = self.draw_curve(lp, r.tr, r.tr - px)
lp = self.draw_line(lp, r.tl + px)
self.lastPoint = self.draw_curve(lp, r.tl, r.tl + py)
self.updateRect(r)
return self.lastPoint
def draw_quad(self, quad: quad_like) -> Point:
"""Draw a Quad."""
q = Quad(quad)
return self.draw_polyline([q.ul, q.ll, q.lr, q.ur, q.ul])
def draw_zigzag(
self,
p1: point_like,
p2: point_like,
breadth: float = 2,
) -> Point:
"""Draw a zig-zagged line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -1) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, 1) * mb
else: # ignore others
continue
points.append(p * i_mat)
self.draw_polyline([p1] + points + [p2]) # add start and end points
return p2
def draw_squiggle(
self,
p1: point_like,
p2: point_like,
breadth=2,
) -> Point:
"""Draw a squiggly line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
k = 2.4142135623765633 # y of draw_curve helper point
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -k) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, k) * mb
else: # else on connection line
p = Point(i, 0) * mb
points.append(p * i_mat)
points = [p1] + points + [p2]
cnt = len(points)
i = 0
while i + 2 < cnt:
self.draw_curve(points[i], points[i + 1], points[i + 2])
i += 2
return p2
# ==============================================================================
# Shape.insert_text
# ==============================================================================
def insert_text(
self,
point: point_like,
buffer: typing.Union[str, list],
fontsize: float = 11,
lineheight: OptFloat = None,
fontname: str = "helv",
fontfile: OptStr = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
render_mode: int = 0,
border_width: float = 0.05,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> int:
# ensure 'text' is a list of strings, worth dealing with
if not bool(buffer):
return 0
if type(buffer) not in (list, tuple):
text = buffer.splitlines()
else:
text = buffer
if not len(text) > 0:
return 0
point = Point(point)
try:
maxcode = max([ord(c) for c in " ".join(text)])
except:
return 0
# ensure valid 'fontname'
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight = fontsize * lineheight
elif ascender - descender <= 1:
lheight = fontsize * 1.2
else:
lheight = fontsize * (ascender - descender)
if maxcode > 255:
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
else:
glyphs = fontdict["glyphs"]
tab = []
for t in text:
if simple and bfname not in ("Symbol", "ZapfDingbats"):
g = None
else:
g = glyphs
tab.append(getTJstr(t, g, simple, ordering))
text = tab
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if not fill and render_mode == 0: # ensure fill color when 0 Tr
fill = color
fill_str = ColorCode(color, "f")
morphing = CheckMorph(morph)
rot = rotate
if rot % 90 != 0:
raise ValueError("bad rotate value")
while rot < 0:
rot += 360
rot = rot % 360 # text rotate = 0, 90, 270, 180
templ1 = "\nq\n%s%sBT\n%s1 0 0 1 %g %g Tm\n/%s %g Tf "
templ2 = "TJ\n0 -%g TD\n"
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates 90 deg counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates 90 deg clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
width = self.width
# setting up for standard rotation directions
# case rotate = 0
if morphing:
m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, height - morph[0].y - self.y)
mat = ~m1 * morph[1] * m1
cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat)
else:
cm = ""
top = height - point.y - self.y # start of 1st char
left = point.x + self.x # start of 1. char
space = top # space available
headroom = point.y + self.y # distance to page border
if rot == 90:
left = height - point.y - self.y
top = -point.x - self.x
cm += cmp90
space = width - abs(top)
headroom = point.x + self.x
elif rot == 270:
left = -height + point.y + self.y
top = point.x + self.x
cm += cmm90
space = abs(top)
headroom = width - point.x - self.x
elif rot == 180:
left = -point.x - self.x
top = -height + point.y + self.y
cm += cm180
space = abs(point.y + self.y)
headroom = height - point.y - self.y
optcont = self.page._get_optional_content(oc)
if optcont != None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha == None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
nres = templ1 % (bdc, alpha, cm, left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += "%g w " % (border_width * fontsize)
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
# =========================================================================
# start text insertion
# =========================================================================
nres += text[0]
nlines = 1 # set output line counter
if len(text) > 1:
nres += templ2 % lheight # line 1
else:
nres += templ2[:2]
for i in range(1, len(text)):
if space < lheight:
break # no space left on page
if i > 1:
nres += "\nT* "
nres += text[i] + templ2[:2]
space -= lheight
nlines += 1
nres += "\nET\n%sQ\n" % emc
# =====================================================================
# end of text insertion
# =====================================================================
# update the /Contents object
self.text_cont += nres
return nlines
# =========================================================================
# Shape.insert_textbox
# =========================================================================
def insert_textbox(
self,
rect: rect_like,
buffer: typing.Union[str, list],
fontname: OptStr = "helv",
fontfile: OptStr = None,
fontsize: float = 11,
lineheight: OptFloat = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
expandtabs: int = 1,
border_width: float = 0.05,
align: int = 0,
render_mode: int = 0,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> float:
"""Insert text into a given rectangle.
Args:
rect -- the textbox to fill
buffer -- text to be inserted
fontname -- a Base-14 font, font name or '/name'
fontfile -- name of a font file
fontsize -- font size
lineheight -- overwrite the font property
color -- RGB stroke color triple
fill -- RGB fill color triple
render_mode -- text rendering control
border_width -- thickness of glyph borders as percentage of fontsize
expandtabs -- handles tabulators with string function
align -- left, center, right, justified
rotate -- 0, 90, 180, or 270 degrees
morph -- morph box with a matrix and a fixpoint
Returns:
unused or deficit rectangle area (float)
"""
rect = Rect(rect)
if rect.is_empty or rect.is_infinite:
raise ValueError("text box must be finite and not empty")
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if fill is None and render_mode == 0: # ensure fill color for 0 Tr
fill = color
fill_str = ColorCode(color, "f")
optcont = self.page._get_optional_content(oc)
if optcont != None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
# determine opacity / transparency
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha == None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
if rotate % 90 != 0:
raise ValueError("rotate must be multiple of 90")
rot = rotate
while rot < 0:
rot += 360
rot = rot % 360
# is buffer worth of dealing with?
if not bool(buffer):
return rect.height if rot in (0, 180) else rect.width
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
glyphs = fontdict["glyphs"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight_factor = lineheight
elif ascender - descender <= 1:
lheight_factor = 1.2
else:
lheight_factor = ascender - descender
lheight = fontsize * lheight_factor
# create a list from buffer, split into its lines
if type(buffer) in (list, tuple):
t0 = "\n".join(buffer)
else:
t0 = buffer
maxcode = max([ord(c) for c in t0])
# replace invalid char codes for simple fonts
if simple and maxcode > 255:
t0 = "".join([c if ord(c) < 256 else "?" for c in t0])
t0 = t0.splitlines()
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
if simple and bfname not in ("Symbol", "ZapfDingbats"):
tj_glyphs = None
else:
tj_glyphs = glyphs
# ----------------------------------------------------------------------
# calculate pixel length of a string
# ----------------------------------------------------------------------
def pixlen(x):
"""Calculate pixel length of x."""
if ordering < 0:
return sum([glyphs[ord(c)][1] for c in x]) * fontsize
else:
return len(x) * fontsize
# ---------------------------------------------------------------------
if ordering < 0:
blen = glyphs[32][1] * fontsize # pixel size of space character
else:
blen = fontsize
text = "" # output buffer
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat)
else:
cm = ""
# ---------------------------------------------------------------------
# adjust for text orientation / rotation
# ---------------------------------------------------------------------
progr = 1 # direction of line progress
c_pnt = Point(0, fontsize * ascender) # used for line progress
if rot == 0: # normal orientation
point = rect.tl + c_pnt # line 1 is 'lheight' below top
maxwidth = rect.width # pixels available in one line
maxheight = rect.height # available text height
elif rot == 90: # rotate counter clockwise
c_pnt = Point(fontsize * ascender, 0) # progress in x-direction
point = rect.bl + c_pnt # line 1 'lheight' away from left
maxwidth = rect.height # pixels available in one line
maxheight = rect.width # available text height
cm += cmp90
elif rot == 180: # text upside down
# progress upwards in y direction
c_pnt = -Point(0, fontsize * ascender)
point = rect.br + c_pnt # line 1 'lheight' above bottom
maxwidth = rect.width # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight = rect.height # available text height
cm += cm180
else: # rotate clockwise (270 or -90)
# progress from right to left
c_pnt = -Point(fontsize * ascender, 0)
point = rect.tr + c_pnt # line 1 'lheight' left of right
maxwidth = rect.height # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight = rect.width # available text height
cm += cmm90
# =====================================================================
# line loop
# =====================================================================
just_tab = [] # 'justify' indicators per line
for i, line in enumerate(t0):
line_t = line.expandtabs(expandtabs).split(" ") # split into words
num_words = len(line_t)
lbuff = "" # init line buffer
rest = maxwidth # available line pixels
# =================================================================
# word loop
# =================================================================
for j in range(num_words):
word = line_t[j]
pl_w = pixlen(word) # pixel len of word
if rest >= pl_w: # does it fit on the line?
lbuff += word + " " # yes, append word
rest -= pl_w + blen # update available line space
continue # next word
# word doesn't fit - output line (if not empty)
if lbuff:
lbuff = lbuff.rstrip() + "\n" # line full, append line break
text += lbuff # append to total text
just_tab.append(True) # can align-justify
lbuff = "" # re-init line buffer
rest = maxwidth # re-init avail. space
if pl_w <= maxwidth: # word shorter than 1 line?
lbuff = word + " " # start the line with it
rest = maxwidth - pl_w - blen # update free space
continue
# long word: split across multiple lines - char by char ...
if len(just_tab) > 0:
just_tab[-1] = False # cannot align-justify
for c in word:
if pixlen(lbuff) <= maxwidth - pixlen(c):
lbuff += c
else: # line full
lbuff += "\n" # close line
text += lbuff # append to text
just_tab.append(False) # cannot align-justify
lbuff = c # start new line with this char
lbuff += " " # finish long word
rest = maxwidth - pixlen(lbuff) # long word stored
if lbuff: # unprocessed line content?
text += lbuff.rstrip() # append to text
just_tab.append(False) # cannot align-justify
if i < len(t0) - 1: # not the last line?
text += "\n" # insert line break
# compute used part of the textbox
if text.endswith("\n"):
text = text[:-1]
lb_count = text.count("\n") + 1 # number of lines written
# text height = line count * line height plus one descender value
text_height = lheight * lb_count - descender * fontsize
more = text_height - maxheight # difference to height limit
if more > EPSILON: # landed too much outside rect
return (-1) * more # return deficit, don't output
more = abs(more)
if more < EPSILON:
more = 0 # don't bother with epsilons
nres = "\nq\n%s%sBT\n" % (bdc, alpha) + cm # initialize output buffer
templ = "1 0 0 1 %g %g Tm /%s %g Tf "
# center, right, justify: output each line with its own specifics
text_t = text.splitlines() # split text in lines again
just_tab[-1] = False # never justify last line
for i, t in enumerate(text_t):
pl = maxwidth - pixlen(t) # length of empty line part
pnt = point + c_pnt * (i * lheight_factor) # text start of line
if align == 1: # center: right shift by half width
if rot in (0, 180):
pnt = pnt + Point(pl / 2, 0) * progr
else:
pnt = pnt - Point(0, pl / 2) * progr
elif align == 2: # right: right shift by full width
if rot in (0, 180):
pnt = pnt + Point(pl, 0) * progr
else:
pnt = pnt - Point(0, pl) * progr
elif align == 3: # justify
spaces = t.count(" ") # number of spaces in line
if spaces > 0 and just_tab[i]: # if any, and we may justify
spacing = pl / spaces # make every space this much larger
else:
spacing = 0 # keep normal space length
top = height - pnt.y - self.y
left = pnt.x + self.x
if rot == 90:
left = height - pnt.y - self.y
top = -pnt.x - self.x
elif rot == 270:
left = -height + pnt.y + self.y
top = pnt.x + self.x
elif rot == 180:
left = -pnt.x - self.x
top = -height + pnt.y + self.y
nres += templ % (left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += "%g w " % (border_width * fontsize)
if align == 3:
nres += "%g Tw " % spacing
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
nres += "%sTJ\n" % getTJstr(t, tj_glyphs, simple, ordering)
nres += "ET\n%sQ\n" % emc
self.text_cont += nres
self.updateRect(rect)
return more
def finish(
self,
width: float = 1,
color: OptSeq = (0,),
fill: OptSeq = None,
lineCap: int = 0,
lineJoin: int = 0,
dashes: OptStr = None,
even_odd: bool = False,
morph: OptSeq = None,
closePath: bool = True,
fill_opacity: float = 1,
stroke_opacity: float = 1,
oc: int = 0,
) -> None:
"""Finish the current drawing segment.
Notes:
Apply colors, opacity, dashes, line style and width, or
morphing. Also whether to close the path
by connecting last to first point.
"""
if self.draw_cont == "": # treat empty contents as no-op
return
if width == 0: # border color makes no sense then
color = None
elif color == None: # vice versa
width = 0
# if color == None and fill == None:
# raise ValueError("at least one of 'color' or 'fill' must be given")
color_str = ColorCode(color, "c") # ensure proper color string
fill_str = ColorCode(fill, "f") # ensure proper fill string
optcont = self.page._get_optional_content(oc)
if optcont is not None:
self.draw_cont = "/OC /%s BDC\n" % optcont + self.draw_cont
emc = "EMC\n"
else:
emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha != None:
self.draw_cont = "/%s gs\n" % alpha + self.draw_cont
if width != 1 and width != 0:
self.draw_cont += "%g w\n" % width
if lineCap != 0:
self.draw_cont = "%i J\n" % lineCap + self.draw_cont
if lineJoin != 0:
self.draw_cont = "%i j\n" % lineJoin + self.draw_cont
if dashes not in (None, "", "[] 0"):
self.draw_cont = "%s d\n" % dashes + self.draw_cont
if closePath:
self.draw_cont += "h\n"
self.lastPoint = None
if color is not None:
self.draw_cont += color_str
if fill is not None:
self.draw_cont += fill_str
if color is not None:
if not even_odd:
self.draw_cont += "B\n"
else:
self.draw_cont += "B*\n"
else:
if not even_odd:
self.draw_cont += "f\n"
else:
self.draw_cont += "f*\n"
else:
self.draw_cont += "S\n"
self.draw_cont += emc
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
self.draw_cont = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) + self.draw_cont
self.totalcont += "\nq\n" + self.draw_cont + "Q\n"
self.draw_cont = ""
self.lastPoint = None
return
def commit(self, overlay: bool = True) -> None:
"""Update the page's /Contents object with Shape data. The argument controls whether data appear in foreground (default) or background."""
CheckParent(self.page) # doc may have died meanwhile
self.totalcont += self.text_cont
self.totalcont = self.totalcont.encode()
if self.totalcont != b"":
# make /Contents object with dummy stream
xref = TOOLS._insert_contents(self.page, b" ", overlay)
# update it with potential compression
self.doc.update_stream(xref, self.totalcont)
self.lastPoint = None # clean up ...
self.rect = None #
self.draw_cont = "" # for potential ...
self.text_cont = "" # ...
self.totalcont = "" # re-use
return
def apply_redactions(page: Page, images: int = 2) -> bool:
"""Apply the redaction annotations of the page.
Args:
page: the PDF page.
images: 0 - ignore images, 1 - remove complete overlapping image,
2 - blank out overlapping image parts.
"""
def center_rect(annot_rect, text, font, fsize):
"""Calculate minimal sub-rectangle for the overlay text.
Notes:
Because 'insert_textbox' supports no vertical text centering,
we calculate an approximate number of lines here and return a
sub-rect with smaller height, which should still be sufficient.
Args:
annot_rect: the annotation rectangle
text: the text to insert.
font: the fontname. Must be one of the CJK or Base-14 set, else
the rectangle is returned unchanged.
fsize: the fontsize
Returns:
A rectangle to use instead of the annot rectangle.
"""
if not text:
return annot_rect
try:
text_width = get_text_length(text, font, fsize)
except ValueError: # unsupported font
return annot_rect
line_height = fsize * 1.2
limit = annot_rect.width
h = math.ceil(text_width / limit) * line_height # estimate rect height
if h >= annot_rect.height:
return annot_rect
r = annot_rect
y = (annot_rect.tl.y + annot_rect.bl.y - h) * 0.5
r.y0 = y
return r
CheckParent(page)
doc = page.parent
if doc.is_encrypted or doc.is_closed:
raise ValueError("document closed or encrypted")
if not doc.is_pdf:
raise ValueError("is no PDF")
redact_annots = [] # storage of annot values
for annot in page.annots(types=(PDF_ANNOT_REDACT,)): # loop redactions
redact_annots.append(annot._get_redact_values()) # save annot values
if redact_annots == []: # any redactions on this page?
return False # no redactions
rc = page._apply_redactions(images) # call MuPDF redaction process step
if not rc: # should not happen really
raise ValueError("Error applying redactions.")
# now write replacement text in old redact rectangles
shape = page.new_shape()
for redact in redact_annots:
annot_rect = redact["rect"]
fill = redact["fill"]
if fill:
shape.draw_rect(annot_rect) # colorize the rect background
shape.finish(fill=fill, color=fill)
if "text" in redact.keys(): # if we also have text
text = redact["text"]
align = redact.get("align", 0)
fname = redact["fontname"]
fsize = redact["fontsize"]
color = redact["text_color"]
# try finding vertical centered sub-rect
trect = center_rect(annot_rect, text, fname, fsize)
rc = -1
while rc < 0 and fsize >= 4: # while not enough room
# (re-) try insertion
rc = shape.insert_textbox(
trect,
text,
fontname=fname,
fontsize=fsize,
color=color,
align=align,
)
fsize -= 0.5 # reduce font if unsuccessful
shape.commit() # append new contents object
return True
# ------------------------------------------------------------------------------
# Remove potentially sensitive data from a PDF. Similar to the Adobe
# Acrobat 'sanitize' function
# ------------------------------------------------------------------------------
def scrub(
doc: Document,
attached_files: bool = True,
clean_pages: bool = True,
embedded_files: bool = True,
hidden_text: bool = True,
javascript: bool = True,
metadata: bool = True,
redactions: bool = True,
redact_images: int = 0,
remove_links: bool = True,
reset_fields: bool = True,
reset_responses: bool = True,
thumbnails: bool = True,
xml_metadata: bool = True,
) -> None:
def remove_hidden(cont_lines):
"""Remove hidden text from a PDF page.
Args:
cont_lines: list of lines with /Contents content. Should have status
from after page.cleanContents().
Returns:
List of /Contents lines from which hidden text has been removed.
Notes:
The input must have been created after the page's /Contents object(s)
have been cleaned with page.cleanContents(). This ensures a standard
formatting: one command per line, single spaces between operators.
This allows for drastic simplification of this code.
"""
out_lines = [] # will return this
in_text = False # indicate if within BT/ET object
suppress = False # indicate text suppression active
make_return = False
for line in cont_lines:
if line == b"BT": # start of text object
in_text = True # switch on
out_lines.append(line) # output it
continue
if line == b"ET": # end of text object
in_text = False # switch off
out_lines.append(line) # output it
continue
if line == b"3 Tr": # text suppression operator
suppress = True # switch on
make_return = True
continue
if line[-2:] == b"Tr" and line[0] != b"3":
suppress = False # text rendering changed
out_lines.append(line)
continue
if line == b"Q": # unstack command also switches off
suppress = False
out_lines.append(line)
continue
if suppress and in_text: # suppress hidden lines
continue
out_lines.append(line)
if make_return:
return out_lines
else:
return None
if not doc.is_pdf: # only works for PDF
raise ValueError("is no PDF")
if doc.is_encrypted or doc.is_closed:
raise ValueError("closed or encrypted doc")
if clean_pages is False:
hidden_text = False
redactions = False
if metadata:
doc.set_metadata({}) # remove standard metadata
for page in doc:
if reset_fields:
# reset form fields (widgets)
for widget in page.widgets():
widget.reset()
if remove_links:
links = page.get_links() # list of all links on page
for link in links: # remove all links
page.delete_link(link)
found_redacts = False
for annot in page.annots():
if annot.type[0] == PDF_ANNOT_FILE_ATTACHMENT and attached_files:
annot.update_file(buffer=b" ") # set file content to empty
if reset_responses:
annot.delete_responses()
if annot.type[0] == PDF_ANNOT_REDACT:
found_redacts = True
if redactions and found_redacts:
page.apply_redactions(images=redact_images)
if not (clean_pages or hidden_text):
continue # done with the page
page.clean_contents()
if not page.get_contents():
continue
if hidden_text:
xref = page.get_contents()[0] # only one b/o cleaning!
cont = doc.xref_stream(xref)
cont_lines = remove_hidden(cont.splitlines()) # remove hidden text
if cont_lines: # something was actually removed
cont = b"\n".join(cont_lines)
doc.update_stream(xref, cont) # rewrite the page /Contents
if thumbnails: # remove page thumbnails?
if doc.xref_get_key(page.xref, "Thumb")[0] != "null":
doc.xref_set_key(page.xref, "Thumb", "null")
# pages are scrubbed, now perform document-wide scrubbing
# remove embedded files
if embedded_files:
for name in doc.embfile_names():
doc.embfile_del(name)
if xml_metadata:
doc.del_xml_metadata()
if not (xml_metadata or javascript):
xref_limit = 0
else:
xref_limit = doc.xref_length()
for xref in range(1, xref_limit):
if not doc.xref_object(xref):
msg = "bad xref %i - clean PDF before scrubbing" % xref
raise ValueError(msg)
if javascript and doc.xref_get_key(xref, "S")[1] == "/JavaScript":
# a /JavaScript action object
obj = "<</S/JavaScript/JS()>>" # replace with a null JavaScript
doc.update_object(xref, obj) # update this object
continue # no further handling
if not xml_metadata:
continue
if doc.xref_get_key(xref, "Type")[1] == "/Metadata":
# delete any metadata object directly
doc.update_object(xref, "<<>>")
doc.update_stream(xref, b"deleted", new=True)
continue
if doc.xref_get_key(xref, "Metadata")[0] != "null":
doc.xref_set_key(xref, "Metadata", "null")
def fill_textbox(
writer: TextWriter,
rect: rect_like,
text: typing.Union[str, list],
pos: point_like = None,
font: typing.Optional[Font] = None,
fontsize: float = 11,
lineheight: OptFloat = None,
align: int = 0,
warn: bool = None,
right_to_left: bool = False,
small_caps: bool = False,
) -> tuple:
"""Fill a rectangle with text.
Args:
writer: TextWriter object (= "self")
rect: rect-like to receive the text.
text: string or list/tuple of strings.
pos: point-like start position of first word.
font: Font object (default Font('helv')).
fontsize: the fontsize.
lineheight: overwrite the font property
align: (int) 0 = left, 1 = center, 2 = right, 3 = justify
warn: (bool) text overflow action: none, warn, or exception
right_to_left: (bool) indicate right-to-left language.
"""
rect = Rect(rect)
if rect.is_empty:
raise ValueError("fill rect must not empty.")
if type(font) is not Font:
font = Font("helv")
def textlen(x):
"""Return length of a string."""
return font.text_length(
x, fontsize=fontsize, small_caps=small_caps
) # abbreviation
def char_lengths(x):
"""Return list of single character lengths for a string."""
return font.char_lengths(x, fontsize=fontsize, small_caps=small_caps)
def append_this(pos, text):
return writer.append(
pos, text, font=font, fontsize=fontsize, small_caps=small_caps
)
tolerance = fontsize * 0.2 # extra distance to left border
space_len = textlen(" ")
std_width = rect.width - tolerance
std_start = rect.x0 + tolerance
def norm_words(width, words):
"""Cut any word in pieces no longer than 'width'."""
nwords = []
word_lengths = []
for w in words:
wl_lst = char_lengths(w)
wl = sum(wl_lst)
if wl <= width: # nothing to do - copy over
nwords.append(w)
word_lengths.append(wl)
continue
# word longer than rect width - split it in parts
n = len(wl_lst)
while n > 0:
wl = sum(wl_lst[:n])
if wl <= width:
nwords.append(w[:n])
word_lengths.append(wl)
w = w[n:]
wl_lst = wl_lst[n:]
n = len(wl_lst)
else:
n -= 1
return nwords, word_lengths
def output_justify(start, line):
"""Justified output of a line."""
# ignore leading / trailing / multiple spaces
words = [w for w in line.split(" ") if w != ""]
nwords = len(words)
if nwords == 0:
return
if nwords == 1: # single word cannot be justified
append_this(start, words[0])
return
tl = sum([textlen(w) for w in words]) # total word lengths
gaps = nwords - 1 # number of word gaps
gapl = (std_width - tl) / gaps # width of each gap
for w in words:
_, lp = append_this(start, w) # output one word
start.x = lp.x + gapl # next start at word end plus gap
return
asc = font.ascender
dsc = font.descender
if not lineheight:
if asc - dsc <= 1:
lheight = 1.2
else:
lheight = asc - dsc
else:
lheight = lineheight
LINEHEIGHT = fontsize * lheight # effective line height
width = std_width # available horizontal space
# starting point of text
if pos is not None:
pos = Point(pos)
else: # default is just below rect top-left
pos = rect.tl + (tolerance, fontsize * asc)
if not pos in rect:
raise ValueError("Text must start in rectangle.")
# calculate displacement factor for alignment
if align == TEXT_ALIGN_CENTER:
factor = 0.5
elif align == TEXT_ALIGN_RIGHT:
factor = 1.0
else:
factor = 0
# split in lines if just a string was given
if type(text) is str:
textlines = text.splitlines()
else:
textlines = []
for line in text:
textlines.extend(line.splitlines())
max_lines = int((rect.y1 - pos.y) / LINEHEIGHT) + 1
new_lines = [] # the final list of textbox lines
no_justify = [] # no justify for these line numbers
for i, line in enumerate(textlines):
if line in ("", " "):
new_lines.append((line, space_len))
width = rect.width - tolerance
no_justify.append((len(new_lines) - 1))
continue
if i == 0:
width = rect.x1 - pos.x
else:
width = rect.width - tolerance
if right_to_left: # reverses Arabic / Hebrew text front to back
line = writer.clean_rtl(line)
tl = textlen(line)
if tl <= width: # line short enough
new_lines.append((line, tl))
no_justify.append((len(new_lines) - 1))
continue
# we need to split the line in fitting parts
words = line.split(" ") # the words in the line
# cut in parts any words that are longer than rect width
words, word_lengths = norm_words(std_width, words)
n = len(words)
while True:
line0 = " ".join(words[:n])
wl = sum(word_lengths[:n]) + space_len * (len(word_lengths[:n]) - 1)
if wl <= width:
new_lines.append((line0, wl))
words = words[n:]
word_lengths = word_lengths[n:]
n = len(words)
line0 = None
else:
n -= 1
if len(words) == 0:
break
# -------------------------------------------------------------------------
# List of lines created. Each item is (text, tl), where 'tl' is the PDF
# output length (float) and 'text' is the text. Except for justified text,
# this is output-ready.
# -------------------------------------------------------------------------
nlines = len(new_lines)
if nlines > max_lines:
msg = "Only fitting %i of %i lines." % (max_lines, nlines)
if warn == True:
print("Warning: " + msg)
elif warn == False:
raise ValueError(msg)
start = Point()
no_justify += [len(new_lines) - 1] # no justifying of last line
for i in range(max_lines):
try:
line, tl = new_lines.pop(0)
except IndexError:
break
if right_to_left: # Arabic, Hebrew
line = "".join(reversed(line))
if i == 0: # may have different start for first line
start = pos
if align == TEXT_ALIGN_JUSTIFY and i not in no_justify and tl < std_width:
output_justify(start, line)
start.x = std_start
start.y += LINEHEIGHT
continue
if i > 0 or pos.x == std_start: # left, center, right alignments
start.x += (width - tl) * factor
append_this(start, line)
start.x = std_start
start.y += LINEHEIGHT
return new_lines # return non-written lines
# ------------------------------------------------------------------------
# Optional Content functions
# ------------------------------------------------------------------------
def get_oc(doc: Document, xref: int) -> int:
"""Return optional content object xref for an image or form xobject.
Args:
xref: (int) xref number of an image or form xobject.
"""
if doc.is_closed or doc.is_encrypted:
raise ValueError("document close or encrypted")
t, name = doc.xref_get_key(xref, "Subtype")
if t != "name" or name not in ("/Image", "/Form"):
raise ValueError("bad object type at xref %i" % xref)
t, oc = doc.xref_get_key(xref, "OC")
if t != "xref":
return 0
rc = int(oc.replace("0 R", ""))
return rc
def set_oc(doc: Document, xref: int, oc: int) -> None:
"""Attach optional content object to image or form xobject.
Args:
xref: (int) xref number of an image or form xobject
oc: (int) xref number of an OCG or OCMD
"""
if doc.is_closed or doc.is_encrypted:
raise ValueError("document close or encrypted")
t, name = doc.xref_get_key(xref, "Subtype")
if t != "name" or name not in ("/Image", "/Form"):
raise ValueError("bad object type at xref %i" % xref)
if oc > 0:
t, name = doc.xref_get_key(oc, "Type")
if t != "name" or name not in ("/OCG", "/OCMD"):
raise ValueError("bad object type at xref %i" % oc)
if oc == 0 and "OC" in doc.xref_get_keys(xref):
doc.xref_set_key(xref, "OC", "null")
return None
doc.xref_set_key(xref, "OC", "%i 0 R" % oc)
return None
def set_ocmd(
doc: Document,
xref: int = 0,
ocgs: typing.Union[list, None] = None,
policy: OptStr = None,
ve: typing.Union[list, None] = None,
) -> int:
"""Create or update an OCMD object in a PDF document.
Args:
xref: (int) 0 for creating a new object, otherwise update existing one.
ocgs: (list) OCG xref numbers, which shall be subject to 'policy'.
policy: one of 'AllOn', 'AllOff', 'AnyOn', 'AnyOff' (any casing).
ve: (list) visibility expression. Use instead of 'ocgs' with 'policy'.
Returns:
Xref of the created or updated OCMD.
"""
all_ocgs = set(doc.get_ocgs().keys())
def ve_maker(ve):
if type(ve) not in (list, tuple) or len(ve) < 2:
raise ValueError("bad 've' format: %s" % ve)
if ve[0].lower() not in ("and", "or", "not"):
raise ValueError("bad operand: %s" % ve[0])
if ve[0].lower() == "not" and len(ve) != 2:
raise ValueError("bad 've' format: %s" % ve)
item = "[/%s" % ve[0].title()
for x in ve[1:]:
if type(x) is int:
if x not in all_ocgs:
raise ValueError("bad OCG %i" % x)
item += " %i 0 R" % x
else:
item += " %s" % ve_maker(x)
item += "]"
return item
text = "<</Type/OCMD"
if ocgs and type(ocgs) in (list, tuple): # some OCGs are provided
s = set(ocgs).difference(all_ocgs) # contains illegal xrefs
if s != set():
msg = "bad OCGs: %s" % s
raise ValueError(msg)
text += "/OCGs[" + " ".join(map(lambda x: "%i 0 R" % x, ocgs)) + "]"
if policy:
policy = str(policy).lower()
pols = {
"anyon": "AnyOn",
"allon": "AllOn",
"anyoff": "AnyOff",
"alloff": "AllOff",
}
if policy not in ("anyon", "allon", "anyoff", "alloff"):
raise ValueError("bad policy: %s" % policy)
text += "/P/%s" % pols[policy]
if ve:
text += "/VE%s" % ve_maker(ve)
text += ">>"
# make new object or replace old OCMD (check type first)
if xref == 0:
xref = doc.get_new_xref()
elif "/Type/OCMD" not in doc.xref_object(xref, compressed=True):
raise ValueError("bad xref or not an OCMD")
doc.update_object(xref, text)
return xref
def get_ocmd(doc: Document, xref: int) -> dict:
"""Return the definition of an OCMD (optional content membership dictionary).
Recognizes PDF dict keys /OCGs (PDF array of OCGs), /P (policy string) and
/VE (visibility expression, PDF array). Via string manipulation, this
info is converted to a Python dictionary with keys "xref", "ocgs", "policy"
and "ve" - ready to recycle as input for 'set_ocmd()'.
"""
if xref not in range(doc.xref_length()):
raise ValueError("bad xref")
text = doc.xref_object(xref, compressed=True)
if "/Type/OCMD" not in text:
raise ValueError("bad object type")
textlen = len(text)
p0 = text.find("/OCGs[") # look for /OCGs key
p1 = text.find("]", p0)
if p0 < 0 or p1 < 0: # no OCGs found
ocgs = None
else:
ocgs = text[p0 + 6 : p1].replace("0 R", " ").split()
ocgs = list(map(int, ocgs))
p0 = text.find("/P/") # look for /P policy key
if p0 < 0:
policy = None
else:
p1 = text.find("ff", p0)
if p1 < 0:
p1 = text.find("on", p0)
if p1 < 0: # some irregular syntax
raise ValueError("bad object at xref")
else:
policy = text[p0 + 3 : p1 + 2]
p0 = text.find("/VE[") # look for /VE visibility expression key
if p0 < 0: # no visibility expression found
ve = None
else:
lp = rp = 0 # find end of /VE by finding last ']'.
p1 = p0
while lp < 1 or lp != rp:
p1 += 1
if not p1 < textlen: # some irregular syntax
raise ValueError("bad object at xref")
if text[p1] == "[":
lp += 1
if text[p1] == "]":
rp += 1
# p1 now positioned at the last "]"
ve = text[p0 + 3 : p1 + 1] # the PDF /VE array
ve = (
ve.replace("/And", '"and",')
.replace("/Not", '"not",')
.replace("/Or", '"or",')
)
ve = ve.replace(" 0 R]", "]").replace(" 0 R", ",").replace("][", "],[")
try:
ve = json.loads(ve)
except:
print("bad /VE key: ", ve)
raise
return {"xref": xref, "ocgs": ocgs, "policy": policy, "ve": ve}
"""
Handle page labels for PDF documents.
Reading
-------
* compute the label of a page
* find page number(s) having the given label.
Writing
-------
Supports setting (defining) page labels for PDF documents.
A big Thank You goes to WILLIAM CHAPMAN who contributed the idea and
significant parts of the following code during late December 2020
through early January 2021.
"""
def rule_dict(item):
"""Make a Python dict from a PDF page label rule.
Args:
item -- a tuple (pno, rule) with the start page number and the rule
string like <</S/D...>>.
Returns:
A dict like
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int}.
"""
# Jorj McKie, 2021-01-06
pno, rule = item
rule = rule[2:-2].split("/")[1:] # strip "<<" and ">>"
d = {"startpage": pno, "prefix": "", "firstpagenum": 1}
skip = False
for i, item in enumerate(rule):
if skip: # this item has already been processed
skip = False # deactivate skipping again
continue
if item == "S": # style specification
d["style"] = rule[i + 1] # next item has the style
skip = True # do not process next item again
continue
if item.startswith("P"): # prefix specification: extract the string
x = item[1:].replace("(", "").replace(")", "")
d["prefix"] = x
continue
if item.startswith("St"): # start page number specification
x = int(item[2:])
d["firstpagenum"] = x
return d
def get_label_pno(pgNo, labels):
"""Return the label for this page number.
Args:
pgNo: page number, 0-based.
labels: result of doc._get_page_labels().
Returns:
The label (str) of the page number. Errors return an empty string.
"""
# Jorj McKie, 2021-01-06
item = [x for x in labels if x[0] <= pgNo][-1]
rule = rule_dict(item)
prefix = rule.get("prefix", "")
style = rule.get("style", "")
pagenumber = pgNo - rule["startpage"] + rule["firstpagenum"]
return construct_label(style, prefix, pagenumber)
def get_label(page):
"""Return the label for this PDF page.
Args:
page: page object.
Returns:
The label (str) of the page. Errors return an empty string.
"""
# Jorj McKie, 2021-01-06
labels = page.parent._get_page_labels()
if not labels:
return ""
labels.sort()
return get_label_pno(page.number, labels)
def get_page_numbers(doc, label, only_one=False):
"""Return a list of page numbers with the given label.
Args:
doc: PDF document object (resp. 'self').
label: (str) label.
only_one: (bool) stop searching after first hit.
Returns:
List of page numbers having this label.
"""
# Jorj McKie, 2021-01-06
numbers = []
if not label:
return numbers
labels = doc._get_page_labels()
if labels == []:
return numbers
for i in range(doc.page_count):
plabel = get_label_pno(i, labels)
if plabel == label:
numbers.append(i)
if only_one:
break
return numbers
def construct_label(style, prefix, pno) -> str:
"""Construct a label based on style, prefix and page number."""
# William Chapman, 2021-01-06
n_str = ""
if style == "D":
n_str = str(pno)
elif style == "r":
n_str = integerToRoman(pno).lower()
elif style == "R":
n_str = integerToRoman(pno).upper()
elif style == "a":
n_str = integerToLetter(pno).lower()
elif style == "A":
n_str = integerToLetter(pno).upper()
result = prefix + n_str
return result
def integerToLetter(i) -> str:
"""Returns letter sequence string for integer i."""
# William Chapman, Jorj McKie, 2021-01-06
ls = string.ascii_uppercase
n, a = 1, i
while pow(26, n) <= a:
a -= int(math.pow(26, n))
n += 1
str_t = ""
for j in reversed(range(n)):
f, g = divmod(a, int(math.pow(26, j)))
str_t += ls[f]
a = g
return str_t
def integerToRoman(num: int) -> str:
"""Return roman numeral for an integer."""
# William Chapman, Jorj McKie, 2021-01-06
roman = (
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
)
def roman_num(num):
for r, ltr in roman:
x, _ = divmod(num, r)
yield ltr * x
num -= r * x
if num <= 0:
break
return "".join([a for a in roman_num(num)])
def get_page_labels(doc):
"""Return page label definitions in PDF document.
Args:
doc: PDF document (resp. 'self').
Returns:
A list of dictionaries with the following format:
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int}.
"""
# Jorj McKie, 2021-01-10
return [rule_dict(item) for item in doc._get_page_labels()]
def set_page_labels(doc, labels):
"""Add / replace page label definitions in PDF document.
Args:
doc: PDF document (resp. 'self').
labels: list of label dictionaries like:
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int},
as returned by get_page_labels().
"""
# William Chapman, 2021-01-06
def create_label_str(label):
"""Convert Python label dict to correspnding PDF rule string.
Args:
label: (dict) build rule for the label.
Returns:
PDF label rule string wrapped in "<<", ">>".
"""
s = "%i<<" % label["startpage"]
if label.get("prefix", "") != "":
s += "/P(%s)" % label["prefix"]
if label.get("style", "") != "":
s += "/S/%s" % label["style"]
if label.get("firstpagenum", 1) > 1:
s += "/St %i" % label["firstpagenum"]
s += ">>"
return s
def create_nums(labels):
"""Return concatenated string of all labels rules.
Args:
labels: (list) dictionaries as created by function 'rule_dict'.
Returns:
PDF compatible string for page label definitions, ready to be
enclosed in PDF array 'Nums[...]'.
"""
labels.sort(key=lambda x: x["startpage"])
s = "".join([create_label_str(label) for label in labels])
return s
doc._set_page_labels(create_nums(labels))
# End of Page Label Code -------------------------------------------------
def has_links(doc: Document) -> bool:
"""Check whether there are links on any page."""
if doc.is_closed:
raise ValueError("document closed")
if not doc.is_pdf:
raise ValueError("is no PDF")
for i in range(doc.page_count):
for item in doc.page_annot_xrefs(i):
if item[1] == PDF_ANNOT_LINK:
return True
return False
def has_annots(doc: Document) -> bool:
"""Check whether there are annotations on any page."""
if doc.is_closed:
raise ValueError("document closed")
if not doc.is_pdf:
raise ValueError("is no PDF")
for i in range(doc.page_count):
for item in doc.page_annot_xrefs(i):
if not (item[1] == PDF_ANNOT_LINK or item[1] == PDF_ANNOT_WIDGET):
return True
return False
# -------------------------------------------------------------------
# Functions to recover the quad contained in a text extraction bbox
# -------------------------------------------------------------------
def recover_bbox_quad(line_dir: tuple, span: dict, bbox: tuple) -> Quad:
"""Compute the quad located inside the bbox.
The bbox may be any of the resp. tuples occurring inside the given span.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line or None.
span: (dict) the span. May be from get_texttrace() method.
bbox: (tuple) the bbox of the span or any of its characters.
Returns:
The quad which is wrapped by the bbox.
"""
if line_dir == None:
line_dir = span["dir"]
cos, sin = line_dir
bbox = Rect(bbox) # make it a rect
if TOOLS.set_small_glyph_heights(): # ==> just fontsize as height
d = 1
else:
d = span["ascender"] - span["descender"]
height = d * span["size"] # the quad's rectangle height
# The following are distances from the bbox corners, at wich we find the
# respective quad points. The computation depends on in which quadrant
# the text writing angle is located.
hs = height * sin
hc = height * cos
if hc >= 0 and hs <= 0: # quadrant 1
ul = bbox.bl - (0, hc)
ur = bbox.tr + (hs, 0)
ll = bbox.bl - (hs, 0)
lr = bbox.tr + (0, hc)
elif hc <= 0 and hs <= 0: # quadrant 2
ul = bbox.br + (hs, 0)
ur = bbox.tl - (0, hc)
ll = bbox.br + (0, hc)
lr = bbox.tl - (hs, 0)
elif hc <= 0 and hs >= 0: # quadrant 3
ul = bbox.tr - (0, hc)
ur = bbox.bl + (hs, 0)
ll = bbox.tr - (hs, 0)
lr = bbox.bl + (0, hc)
else: # quadrant 4
ul = bbox.tl + (hs, 0)
ur = bbox.br - (0, hc)
ll = bbox.tl + (0, hc)
lr = bbox.br - (hs, 0)
return Quad(ul, ur, ll, lr)
def recover_quad(line_dir: tuple, span: dict) -> Quad:
"""Recover the quadrilateral of a text span.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line.
span: the span.
Returns:
The quadrilateral enveloping the span's text.
"""
if type(line_dir) is not tuple or len(line_dir) != 2:
raise ValueError("bad line dir argument")
if type(span) is not dict:
raise ValueError("bad span argument")
return recover_bbox_quad(line_dir, span, span["bbox"])
def recover_line_quad(line: dict, spans: list = None) -> Quad:
"""Calculate the line quad for 'dict' / 'rawdict' text extractions.
The lower quad points are those of the first, resp. last span quad.
The upper points are determined by the maximum span quad height.
From this, compute a rect with bottom-left in (0, 0), convert this to a
quad and rotate and shift back to cover the text of the spans.
Args:
spans: (list, optional) sub-list of spans to consider.
Returns:
Quad covering selected spans.
"""
if spans == None: # no sub-selection
spans = line["spans"] # all spans
if len(spans) == 0:
raise ValueError("bad span list")
line_dir = line["dir"] # text direction
cos, sin = line_dir
q0 = recover_quad(line_dir, spans[0]) # quad of first span
if len(spans) > 1: # get quad of last span
q1 = recover_quad(line_dir, spans[-1])
else:
q1 = q0 # last = first
line_ll = q0.ll # lower-left of line quad
line_lr = q1.lr # lower-right of line quad
mat0 = planish_line(line_ll, line_lr)
# map base line to x-axis such that line_ll goes to (0, 0)
x_lr = line_lr * mat0
small = TOOLS.set_small_glyph_heights() # small glyph heights?
h = max(
[s["size"] * (1 if small else (s["ascender"] - s["descender"])) for s in spans]
)
line_rect = Rect(0, -h, x_lr.x, 0) # line rectangle
line_quad = line_rect.quad # make it a quad and:
line_quad *= ~mat0
return line_quad
def recover_span_quad(line_dir: tuple, span: dict, chars: list = None) -> Quad:
"""Calculate the span quad for 'dict' / 'rawdict' text extractions.
Notes:
There are two execution paths:
1. For the full span quad, the result of 'recover_quad' is returned.
2. For the quad of a sub-list of characters, the char quads are
computed and joined. This is only supported for the "rawdict"
extraction option.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line.
span: (dict) the span.
chars: (list, optional) sub-list of characters to consider.
Returns:
Quad covering selected characters.
"""
if line_dir == None: # must be a span from get_texttrace()
line_dir = span["dir"]
if chars == None: # no sub-selection
return recover_quad(line_dir, span)
if not "chars" in span.keys():
raise ValueError("need 'rawdict' option to sub-select chars")
q0 = recover_char_quad(line_dir, span, chars[0]) # quad of first char
if len(chars) > 1: # get quad of last char
q1 = recover_char_quad(line_dir, span, chars[-1])
else:
q1 = q0 # last = first
span_ll = q0.ll # lower-left of span quad
span_lr = q1.lr # lower-right of span quad
mat0 = planish_line(span_ll, span_lr)
# map base line to x-axis such that span_ll goes to (0, 0)
x_lr = span_lr * mat0
small = TOOLS.set_small_glyph_heights() # small glyph heights?
h = span["size"] * (1 if small else (span["ascender"] - span["descender"]))
span_rect = Rect(0, -h, x_lr.x, 0) # line rectangle
span_quad = span_rect.quad # make it a quad and:
span_quad *= ~mat0 # rotate back and shift back
return span_quad
def recover_char_quad(line_dir: tuple, span: dict, char: dict) -> Quad:
"""Recover the quadrilateral of a text character.
This requires the "rawdict" option of text extraction.
Args:
line_dir: (tuple) 'line["dir"]' of the span's line.
span: (dict) the span dict.
char: (dict) the character dict.
Returns:
The quadrilateral enveloping the character.
"""
if line_dir == None:
line_dir = span["dir"]
if type(line_dir) is not tuple or len(line_dir) != 2:
raise ValueError("bad line dir argument")
if type(span) is not dict:
raise ValueError("bad span argument")
if type(char) is dict:
bbox = Rect(char["bbox"])
elif type(char) is tuple:
bbox = Rect(char[3])
else:
raise ValueError("bad span argument")
return recover_bbox_quad(line_dir, span, bbox)
# -------------------------------------------------------------------
# Building font subsets using fontTools
# -------------------------------------------------------------------
def subset_fonts(doc: Document, verbose: bool = False) -> None:
"""Build font subsets of a PDF. Requires package 'fontTools'.
Eligible fonts are potentially replaced by smaller versions. Page text is
NOT rewritten and thus should retain properties like being hidden or
controlled by optional content.
"""
# Font binaries: - "buffer" -> (names, xrefs, (unicodes, glyphs))
# An embedded font is uniquely defined by its fontbuffer only. It may have
# multiple names and xrefs.
# Once the sets of used unicodes and glyphs are known, we compute a
# smaller version of the buffer user package fontTools.
font_buffers = {}
def get_old_widths(xref):
"""Retrieve old font '/W' and '/DW' values."""
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array": # only handle xref specifications
return None, None
df_xref = int(df[1][1:-1].replace("0 R", ""))
widths = doc.xref_get_key(df_xref, "W")
if widths[0] != "array": # no widths key found
widths = None
else:
widths = widths[1]
dwidths = doc.xref_get_key(df_xref, "DW")
if dwidths[0] != "int":
dwidths = None
else:
dwidths = dwidths[1]
return widths, dwidths
def set_old_widths(xref, widths, dwidths):
"""Restore the old '/W' and '/DW' in subsetted font.
If either parameter is None or evaluates to False, the corresponding
dictionary key will be set to null.
"""
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array": # only handle xref specs
return None
df_xref = int(df[1][1:-1].replace("0 R", ""))
if (type(widths) is not str or not widths) and doc.xref_get_key(df_xref, "W")[
0
] != "null":
doc.xref_set_key(df_xref, "W", "null")
else:
doc.xref_set_key(df_xref, "W", widths)
if (type(dwidths) is not str or not dwidths) and doc.xref_get_key(
df_xref, "DW"
)[0] != "null":
doc.xref_set_key(df_xref, "DW", "null")
else:
doc.xref_set_key(df_xref, "DW", dwidths)
return None
def set_subset_fontname(new_xref):
"""Generate a name prefix to tag a font as subset.
We use a random generator to select 6 upper case ASCII characters.
The prefixed name must be put in the font xref as the "/BaseFont" value
and in the FontDescriptor object as the '/FontName' value.
"""
# The following generates a prefix like 'ABCDEF+'
prefix = "".join(random.choices(tuple(string.ascii_uppercase), k=6)) + "+"
font_str = doc.xref_object(new_xref, compressed=True)
font_str = font_str.replace("/BaseFont/", "/BaseFont/" + prefix)
df = doc.xref_get_key(new_xref, "DescendantFonts")
if df[0] == "array":
df_xref = int(df[1][1:-1].replace("0 R", ""))
fd = doc.xref_get_key(df_xref, "FontDescriptor")
if fd[0] == "xref":
fd_xref = int(fd[1].replace("0 R", ""))
fd_str = doc.xref_object(fd_xref, compressed=True)
fd_str = fd_str.replace("/FontName/", "/FontName/" + prefix)
doc.update_object(fd_xref, fd_str)
doc.update_object(new_xref, font_str)
return None
def build_subset(buffer, unc_set, gid_set):
"""Build font subset using fontTools.
Args:
buffer: (bytes) the font given as a binary buffer.
unc_set: (set) required glyph ids.
Returns:
Either None if subsetting is unsuccessful or the subset font buffer.
"""
try:
import fontTools.subset as fts
except ImportError:
print("This method requires fontTools to be installed.")
raise
tmp_dir = tempfile.gettempdir()
oldfont_path = f"{tmp_dir}/oldfont.ttf"
newfont_path = f"{tmp_dir}/newfont.ttf"
uncfile_path = f"{tmp_dir}/uncfile.txt"
args = [
oldfont_path,
"--retain-gids",
f"--output-file={newfont_path}",
"--layout-features='*'",
"--passthrough-tables",
"--ignore-missing-glyphs",
"--ignore-missing-unicodes",
"--symbol-cmap",
]
unc_file = open(
f"{tmp_dir}/uncfile.txt", "w"
) # store glyph ids or unicodes as file
if 0xFFFD in unc_set: # error unicode exists -> use glyphs
args.append(f"--gids-file={uncfile_path}")
gid_set.add(189)
unc_list = list(gid_set)
for unc in unc_list:
unc_file.write("%i\n" % unc)
else:
args.append(f"--unicodes-file={uncfile_path}")
unc_set.add(255)
unc_list = list(unc_set)
for unc in unc_list:
unc_file.write("%04x\n" % unc)
unc_file.close()
fontfile = open(oldfont_path, "wb") # store fontbuffer as a file
fontfile.write(buffer)
fontfile.close()
try:
os.remove(newfont_path) # remove old file
except:
pass
try: # invoke fontTools subsetter
fts.main(args)
font = Font(fontfile=newfont_path)
new_buffer = font.buffer
if len(font.valid_codepoints()) == 0:
new_buffer = None
except:
new_buffer = None
try:
os.remove(uncfile_path)
except:
pass
try:
os.remove(oldfont_path)
except:
pass
try:
os.remove(newfont_path)
except:
pass
return new_buffer
def repl_fontnames(doc):
"""Populate 'font_buffers'.
For each font candidate, store its xref and the list of names
by which PDF text may refer to it (there may be multiple).
"""
def norm_name(name):
"""Recreate font name that contains PDF hex codes.
E.g. #20 -> space, chr(32)
"""
while "#" in name:
p = name.find("#")
c = int(name[p + 1 : p + 3], 16)
name = name.replace(name[p : p + 3], chr(c))
return name
def get_fontnames(doc, item):
"""Return a list of fontnames for an item of page.get_fonts().
There may be multiple names e.g. for Type0 fonts.
"""
fontname = item[3]
names = [fontname]
fontname = doc.xref_get_key(item[0], "BaseFont")[1][1:]
fontname = norm_name(fontname)
if fontname not in names:
names.append(fontname)
descendents = doc.xref_get_key(item[0], "DescendantFonts")
if descendents[0] != "array":
return names
descendents = descendents[1][1:-1]
if descendents.endswith(" 0 R"):
xref = int(descendents[:-4])
descendents = doc.xref_object(xref, compressed=True)
p1 = descendents.find("/BaseFont")
if p1 >= 0:
p2 = descendents.find("/", p1 + 1)
p1 = min(descendents.find("/", p2 + 1), descendents.find(">>", p2 + 1))
fontname = descendents[p2 + 1 : p1]
fontname = norm_name(fontname)
if fontname not in names:
names.append(fontname)
return names
for i in range(doc.page_count):
for f in doc.get_page_fonts(i, full=True):
font_xref = f[0] # font xref
font_ext = f[1] # font file extension
basename = f[3] # font basename
if font_ext not in ( # skip if not supported by fontTools
"otf",
"ttf",
"woff",
"woff2",
):
continue
# skip fonts which already are subsets
if len(basename) > 6 and basename[6] == "+":
continue
extr = doc.extract_font(font_xref)
fontbuffer = extr[-1]
names = get_fontnames(doc, f)
name_set, xref_set, subsets = font_buffers.get(
fontbuffer, (set(), set(), (set(), set()))
)
xref_set.add(font_xref)
for name in names:
name_set.add(name)
font = Font(fontbuffer=fontbuffer)
name_set.add(font.name)
del font
font_buffers[fontbuffer] = (name_set, xref_set, subsets)
return None
def find_buffer_by_name(name):
for buffer in font_buffers.keys():
name_set, _, _ = font_buffers[buffer]
if name in name_set:
return buffer
return None
# -----------------
# main function
# -----------------
repl_fontnames(doc) # populate font information
if not font_buffers: # nothing found to do
if verbose:
print("No fonts to subset.")
return 0
old_fontsize = 0
new_fontsize = 0
for fontbuffer in font_buffers.keys():
old_fontsize += len(fontbuffer)
# Scan page text for usage of subsettable fonts
for page in doc:
# go through the text and extend set of used glyphs by font
# we use a modified MuPDF trace device, which delivers us glyph ids.
for span in page.get_texttrace():
if type(span) is not dict: # skip useless information
continue
fontname = span["font"][:33] # fontname for the span
buffer = find_buffer_by_name(fontname)
if buffer is None:
continue
name_set, xref_set, (set_ucs, set_gid) = font_buffers[buffer]
for c in span["chars"]:
set_ucs.add(c[0]) # unicode
set_gid.add(c[1]) # glyph id
font_buffers[buffer] = (name_set, xref_set, (set_ucs, set_gid))
# build the font subsets
for old_buffer in font_buffers.keys():
name_set, xref_set, subsets = font_buffers[old_buffer]
new_buffer = build_subset(old_buffer, subsets[0], subsets[1])
fontname = list(name_set)[0]
if new_buffer == None or len(new_buffer) >= len(old_buffer):
# subset was not created or did not get smaller
if verbose:
print(f"Cannot subset '{fontname}'.")
continue
if verbose:
print(f"Built subset of font '{fontname}'.")
val = doc._insert_font(fontbuffer=new_buffer) # store subset font in PDF
new_xref = val[0] # get its xref
set_subset_fontname(new_xref) # tag fontname as subset font
font_str = doc.xref_object( # get its object definition
new_xref,
compressed=True,
)
# walk through the original font xrefs and replace each by the subset def
for font_xref in xref_set:
# we need the original '/W' and '/DW' width values
width_table, def_width = get_old_widths(font_xref)
# ... and replace original font definition at xref with it
doc.update_object(font_xref, font_str)
# now copy over old '/W' and '/DW' values
if width_table or def_width:
set_old_widths(font_xref, width_table, def_width)
# 'new_xref' remains unused in the PDF and must be removed
# by garbage collection.
new_fontsize += len(new_buffer)
return old_fontsize - new_fontsize
# -------------------------------------------------------------------
# Copy XREF object to another XREF
# -------------------------------------------------------------------
def xref_copy(doc: Document, source: int, target: int, *, keep: list = None) -> None:
"""Copy a PDF dictionary object to another one given their xref numbers.
Args:
doc: PDF document object
source: source xref number
target: target xref number, the xref must already exist
keep: an optional list of 1st level keys in target that should not be
removed before copying.
Notes:
This works similar to the copy() method of dictionaries in Python. The
source may be a stream object.
"""
if doc.xref_is_stream(source):
# read new xref stream, maintaining compression
stream = doc.xref_stream_raw(source)
doc.update_stream(
target,
stream,
compress=False, # keeps source compression
new=True, # in case target is no stream
)
# empty the target completely, observe exceptions
if keep is None:
keep = []
for key in doc.xref_get_keys(target):
if key in keep:
continue
doc.xref_set_key(target, key, "null")
# copy over all source dict items
for key in doc.xref_get_keys(source):
item = doc.xref_get_key(source, key)
doc.xref_set_key(target, key, item[1])
return None
|
Shape
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/evaluator.py
|
{
"start": 1108,
"end": 1340
}
|
class ____(operators.ColumnOperators):
def operate(self, *arg, **kw):
return self
def reverse_operate(self, *arg, **kw):
return self
_NO_OBJECT = _NoObject()
_EXPIRED_OBJECT = _ExpiredObject()
|
_ExpiredObject
|
python
|
davidhalter__jedi
|
jedi/inference/recursion.py
|
{
"start": 1569,
"end": 2791
}
|
class ____:
def __init__(self):
self.pushed_nodes = []
@contextmanager
def execution_allowed(inference_state, node):
"""
A decorator to detect recursions in statements. In a recursion a statement
at the same place, in the same module may not be executed two times.
"""
pushed_nodes = inference_state.recursion_detector.pushed_nodes
if node in pushed_nodes:
debug.warning('catched stmt recursion: %s @%s', node,
getattr(node, 'start_pos', None))
yield False
else:
try:
pushed_nodes.append(node)
yield True
finally:
pushed_nodes.pop()
def execution_recursion_decorator(default=NO_VALUES):
def decorator(func):
def wrapper(self, **kwargs):
detector = self.inference_state.execution_recursion_detector
limit_reached = detector.push_execution(self)
try:
if limit_reached:
result = default
else:
result = func(self, **kwargs)
finally:
detector.pop_execution()
return result
return wrapper
return decorator
|
RecursionDetector
|
python
|
google__jax
|
jax/_src/lax/lax.py
|
{
"start": 74494,
"end": 78436
}
|
class ____(NamedTuple):
"""Specify the algorithm used for computing dot products.
When used to specify the ``precision`` input to :func:`~jax.lax.dot`,
:func:`~jax.lax.dot_general`, and other dot product functions, this data
structure is used for controlling the properties of the algorithm used for
computing the dot product. This API controls the precision used for the
computation, and allows users to access hardware-specific accelerations.
Support for these algorithms is platform dependent, and using an unsupported
algorithm will raise a Python exception when the computation is compiled. The
algorithms that are known to be supported on at least some platforms are
listed in the :class:`~jax.lax.DotAlgorithmPreset` enum, and these are a
good starting point for experimenting with this API.
A "dot algorithm" is specified by the following parameters:
* ``lhs_precision_type`` and ``rhs_precision_type``, the data types that the
LHS and RHS of the operation are rounded to.
* ``accumulation_type`` the data type used for accumulation.
* ``lhs_component_count``, ``rhs_component_count``, and
``num_primitive_operations`` apply to algorithms that decompose the LHS
and/or RHS into multiple components and execute multiple operations on
those values, usually to emulate a higher precision. For algorithms with no
decomposition, these values should be set to ``1``.
* ``allow_imprecise_accumulation`` to specify if accumulation in lower
precision is permitted for some steps (e.g.
``CUBLASLT_MATMUL_DESC_FAST_ACCUM``).
The `StableHLO spec <https://openxla.org/stablehlo/spec#dot_general>`_ for
the dot operation doesn't require that the precision types be the same as the
storage types for the inputs or outputs, but some platforms may require that
these types match. Furthermore, the return type of
:func:`~jax.lax.dot_general` is always defined by the ``accumulation_type``
parameter of the input algorithm, if specified.
Examples:
Accumulate two 16-bit floats using a 32-bit float accumulator:
>>> algorithm = DotAlgorithm(
... lhs_precision_type=np.float16,
... rhs_precision_type=np.float16,
... accumulation_type=np.float32,
... )
>>> lhs = jnp.array([1.0, 2.0, 3.0, 4.0], dtype=np.float16)
>>> rhs = jnp.array([1.0, 2.0, 3.0, 4.0], dtype=np.float16)
>>> dot(lhs, rhs, precision=algorithm) # doctest: +SKIP
array([ 1., 4., 9., 16.], dtype=float16)
Or, equivalently, using a preset:
>>> algorithm = DotAlgorithmPreset.F16_F16_F32
>>> dot(lhs, rhs, precision=algorithm) # doctest: +SKIP
array([ 1., 4., 9., 16.], dtype=float16)
Presets can also be specified by name:
>>> dot(lhs, rhs, precision="F16_F16_F32") # doctest: +SKIP
array([ 1., 4., 9., 16.], dtype=float16)
The ``preferred_element_type`` parameter can be used to return the output
without downcasting the accumulation type:
>>> dot(lhs, rhs, precision="F16_F16_F32", preferred_element_type=np.float32) # doctest: +SKIP
array([ 1., 4., 9., 16.], dtype=float32)
"""
lhs_precision_type: DTypeLike
rhs_precision_type: DTypeLike
accumulation_type: DTypeLike
lhs_component_count: int = 1
rhs_component_count: int = 1
num_primitive_operations: int = 1
allow_imprecise_accumulation: bool = False
def _convert_to_hlo_attr(self, lhs_dtype: DTypeLike,
rhs_dtype: DTypeLike) -> hlo.DotAlgorithm:
del lhs_dtype, rhs_dtype # unused
return hlo.DotAlgorithm.get(
mlir.dtype_to_ir_type(dtypes.dtype(self.lhs_precision_type)),
mlir.dtype_to_ir_type(dtypes.dtype(self.rhs_precision_type)),
mlir.dtype_to_ir_type(dtypes.dtype(self.accumulation_type)),
self.lhs_component_count,
self.rhs_component_count,
self.num_primitive_operations,
self.allow_imprecise_accumulation,
)
|
DotAlgorithm
|
python
|
huggingface__transformers
|
src/transformers/models/segformer/modeling_segformer.py
|
{
"start": 8698,
"end": 9113
}
|
class ____(nn.Module):
def __init__(self, config, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
SegformerSelfOutput
|
python
|
ansible__ansible
|
test/integration/targets/common_network/test_plugins/is_mac.py
|
{
"start": 238,
"end": 342
}
|
class ____(object):
def tests(self):
return {
'is_mac': is_mac,
}
|
TestModule
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/flowchart/library/Filters.py
|
{
"start": 6457,
"end": 6659
}
|
class ____(CtrlNode):
"""Returns the pointwise integral of the input"""
nodeName = 'IntegralFilter'
def processData(self, data):
data[1:] += data[:-1]
return data
|
Integral
|
python
|
gevent__gevent
|
src/greentest/3.14/test_smtplib.py
|
{
"start": 1313,
"end": 4803
}
|
class ____:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
|
GeneralTests
|
python
|
davidhalter__jedi
|
jedi/_compatibility.py
|
{
"start": 169,
"end": 1491
}
|
class ____(pickle.Unpickler):
def find_class(self, module: str, name: str) -> Any:
# Python 3.13 moved pathlib implementation out of __init__.py as part of
# generalising its implementation. Ensure that we support loading
# pickles from 3.13 on older version of Python. Since 3.13 maintained a
# compatible API, pickles from older Python work natively on the newer
# version.
if module == 'pathlib._local':
module = 'pathlib'
return super().find_class(module, name)
def pickle_load(file):
try:
return Unpickler(file).load()
# Python on Windows don't throw EOF errors for pipes. So reraise them with
# the correct type, which is caught upwards.
except OSError:
if sys.platform == 'win32':
raise EOFError()
raise
def pickle_dump(data, file, protocol):
try:
pickle.dump(data, file, protocol)
# On Python 3.3 flush throws sometimes an error even though the writing
# operation should be completed.
file.flush()
# Python on Windows don't throw EPIPE errors for pipes. So reraise them with
# the correct type and error number.
except OSError:
if sys.platform == 'win32':
raise IOError(errno.EPIPE, "Broken pipe")
raise
|
Unpickler
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/foundry.py
|
{
"start": 1404,
"end": 1629
}
|
class ____(Messages):
@cached_property
@override
def batches(self) -> None: # type: ignore[override]
"""Batches endpoint is not supported for Anthropic Foundry client."""
return None
|
MessagesFoundry
|
python
|
python-pillow__Pillow
|
src/PIL/FontFile.py
|
{
"start": 702,
"end": 3577
}
|
class ____:
"""Base class for raster font file handlers."""
bitmap: Image.Image | None = None
def __init__(self) -> None:
self.info: dict[bytes, bytes | int] = {}
self.glyph: list[
tuple[
tuple[int, int],
tuple[int, int, int, int],
tuple[int, int, int, int],
Image.Image,
]
| None
] = [None] * 256
def __getitem__(self, ix: int) -> (
tuple[
tuple[int, int],
tuple[int, int, int, int],
tuple[int, int, int, int],
Image.Image,
]
| None
):
return self.glyph[ix]
def compile(self) -> None:
"""Create metrics and bitmap"""
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self.glyph:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines += 1
w = src[2] - src[0]
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics: list[
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
| None
] = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx = src[2] - src[0]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
self.metrics[i] = d, dst, s
def save(self, filename: str) -> None:
"""Save font"""
self.compile()
# font data
if not self.bitmap:
msg = "No bitmap created"
raise ValueError(msg)
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
fp.write(b"PILfont\n")
fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
fp.write(b"DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, (0,) * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
|
FontFile
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_filters.py
|
{
"start": 19976,
"end": 23029
}
|
class ____(TestCase):
def setUp(self):
self.css = """
<link rel="stylesheet" href="/static/css/datauri.css" type="text/css">
"""
self.css_node = CssCompressor("css", self.css)
def test_data_uris(self):
datauri_hash = get_hashed_mtime(
os.path.join(settings.COMPRESS_ROOT, "img/python.png")
)
out = [
""".add { background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLpZPrS5NhGIf9W7YvBYOkhlkoqCklWChv2WyKik7blnNris72bi6dus0DLZ0TDxW1odtopDs4D8MDZuLU0kXq61CijSIIasOvv94VTUfLiB74fXngup7nvrnvJABJ/5PfLnTTdcwOj4RsdYmo5glBWP6iOtzwvIKSWstI0Wgx80SBblpKtE9KQs/We7EaWoT/8wbWP61gMmCH0lMDvokT4j25TiQU/ITFkek9Ow6+7WH2gwsmahCPdwyw75uw9HEO2gUZSkfyI9zBPCJOoJ2SMmg46N61YO/rNoa39Xi41oFuXysMfh36/Fp0b7bAfWAH6RGi0HglWNCbzYgJaFjRv6zGuy+b9It96N3SQvNKiV9HvSaDfFEIxXItnPs23BzJQd6DDEVM0OKsoVwBG/1VMzpXVWhbkUM2K4oJBDYuGmbKIJ0qxsAbHfRLzbjcnUbFBIpx/qH3vQv9b3U03IQ/HfFkERTzfFj8w8jSpR7GBE123uFEYAzaDRIqX/2JAtJbDat/COkd7CNBva2cMvq0MGxp0PRSCPF8BXjWG3FgNHc9XPT71Ojy3sMFdfJRCeKxEsVtKwFHwALZfCUk3tIfNR8XiJwc1LmL4dg141JPKtj3WUdNFJqLGFVPC4OkR4BxajTWsChY64wmCnMxsWPCHcutKBxMVp5mxA1S+aMComToaqTRUQknLTH62kHOVEE+VQnjahscNCy0cMBWsSI0TCQcZc5ALkEYckL5A5noWSBhfm2AecMAjbcRWV0pUTh0HE64TNf0mczcnnQyu/MilaFJCae1nw2fbz1DnVOxyGTlKeZft/Ff8x1BRssfACjTwQAAAABJRU5ErkJggg=="); }
.add-with-hash { background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLpZPrS5NhGIf9W7YvBYOkhlkoqCklWChv2WyKik7blnNris72bi6dus0DLZ0TDxW1odtopDs4D8MDZuLU0kXq61CijSIIasOvv94VTUfLiB74fXngup7nvrnvJABJ/5PfLnTTdcwOj4RsdYmo5glBWP6iOtzwvIKSWstI0Wgx80SBblpKtE9KQs/We7EaWoT/8wbWP61gMmCH0lMDvokT4j25TiQU/ITFkek9Ow6+7WH2gwsmahCPdwyw75uw9HEO2gUZSkfyI9zBPCJOoJ2SMmg46N61YO/rNoa39Xi41oFuXysMfh36/Fp0b7bAfWAH6RGi0HglWNCbzYgJaFjRv6zGuy+b9It96N3SQvNKiV9HvSaDfFEIxXItnPs23BzJQd6DDEVM0OKsoVwBG/1VMzpXVWhbkUM2K4oJBDYuGmbKIJ0qxsAbHfRLzbjcnUbFBIpx/qH3vQv9b3U03IQ/HfFkERTzfFj8w8jSpR7GBE123uFEYAzaDRIqX/2JAtJbDat/COkd7CNBva2cMvq0MGxp0PRSCPF8BXjWG3FgNHc9XPT71Ojy3sMFdfJRCeKxEsVtKwFHwALZfCUk3tIfNR8XiJwc1LmL4dg141JPKtj3WUdNFJqLGFVPC4OkR4BxajTWsChY64wmCnMxsWPCHcutKBxMVp5mxA1S+aMComToaqTRUQknLTH62kHOVEE+VQnjahscNCy0cMBWsSI0TCQcZc5ALkEYckL5A5noWSBhfm2AecMAjbcRWV0pUTh0HE64TNf0mczcnnQyu/MilaFJCae1nw2fbz1DnVOxyGTlKeZft/Ff8x1BRssfACjTwQAAAABJRU5ErkJggg=="); }
.python { background-image: url("/static/img/python.png?%s"); }
.datauri { background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9YGARc5KB0XV+IAAAAddEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIFRoZSBHSU1Q72QlbgAAAF1JREFUGNO9zL0NglAAxPEfdLTs4BZM4DIO4C7OwQg2JoQ9LE1exdlYvBBeZ7jqch9//q1uH4TLzw4d6+ErXMMcXuHWxId3KOETnnXXV6MJpcq2MLaI97CER3N0 vr4MkhoXe0rZigAAAABJRU5ErkJggg=="); }
"""
% datauri_hash
]
self.assertEqual(out, list(self.css_node.hunks()))
|
CssDataUriTestCase
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/streams.py
|
{
"start": 64962,
"end": 67618
}
|
class ____(SemiIncrementalMixin, GithubStream):
"""
Get all workflow runs for a GitHub repository
API documentation: https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-repository
"""
# key for accessing slice value from record
record_slice_key = ["repository", "full_name"]
# https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs
re_run_period = 32 # days
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/actions/runs"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
response = response.json().get("workflow_runs")
for record in response:
yield record
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
# Records in the workflows_runs stream are naturally descending sorted by `created_at` field.
# On first sight this is not big deal because cursor_field is `updated_at`.
# But we still can use `created_at` as a breakpoint because after 30 days period
# https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs
# workflows_runs records cannot be updated. It means if we initially fully synced stream on subsequent incremental sync we need
# only to look behind on 30 days to find all records which were updated.
start_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice)
break_point = None
# the state is updated only in the end of the sync as records are sorted in reverse order
new_state = self.state
if start_point:
break_point = (ab_datetime_parse(start_point) - timedelta(days=self.re_run_period)).isoformat()
for record in super(SemiIncrementalMixin, self).read_records(
sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
):
cursor_value = record[self.cursor_field]
created_at = record["created_at"]
if not start_point or cursor_value > start_point:
yield record
new_state = self._get_updated_state(new_state, record)
if break_point and created_at < break_point:
break
self.state = new_state
|
WorkflowRuns
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
|
{
"start": 8085,
"end": 10123
}
|
class ____(object):
"""Common class for circulant tests."""
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 2e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 2e-7,
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 2e-7,
dtypes.complex64: 1e-6,
dtypes.complex128: 2e-7,
}
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
yield sess
def _shape_to_spectrum_shape(self, shape):
# If spectrum.shape = batch_shape + [N],
# this creates an operator of shape batch_shape + [N, N]
return shape[:-1]
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops_stack.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
|
LinearOperatorCirculantBaseTest
|
python
|
chroma-core__chroma
|
chromadb/db/base.py
|
{
"start": 909,
"end": 1585
}
|
class ____(ABC, EnforceOverrides):
"""Wrapper class for DBAPI 2.0 Connection objects, with which clients can implement transactions.
Makes two guarantees that basic DBAPI 2.0 connections do not:
- __enter__ returns a Cursor object consistently (instead of a Connection like some do)
- Always re-raises an exception if one was thrown from the body
"""
@abstractmethod
def __enter__(self) -> Cursor:
pass
@abstractmethod
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
pass
|
TxWrapper
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_selectable.py
|
{
"start": 116606,
"end": 117458
}
|
class ____(fixtures.TestBase):
def test_ensure_repr_elements(self):
for obj in [
elements.Cast(1, Integer()),
elements.TypeClause(String()),
elements.ColumnClause("x"),
elements.BindParameter("q"),
elements.Null(),
elements.True_(),
elements.False_(),
elements.ClauseList(),
elements.BooleanClauseList._construct_raw(operators.and_),
elements.BooleanClauseList._construct_raw(operators.or_),
elements.Tuple(),
elements.Case(),
elements.Extract("foo", column("x")),
elements.UnaryExpression(column("x")),
elements.Grouping(column("x")),
elements.Over(func.foo()),
elements.Label("q", column("x")),
]:
repr(obj)
|
ReprTest
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/env_vars.py
|
{
"start": 1730,
"end": 1885
}
|
class ____(graphene.ObjectType):
class Meta:
name = "LocationDocsJson"
json = graphene.NonNull(graphene.JSONString)
|
GrapheneLocationDocsJson
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 7352,
"end": 7470
}
|
class ____(ShowFieldType, PolymorphicModel):
modelfieldnametest = models.CharField(max_length=30)
|
ModelFieldNameTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/initializers/initializers_v2.py
|
{
"start": 29314,
"end": 31143
}
|
class ____(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return dtypes.as_dtype(dtype)
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
|
HeUniform
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/core/configs/openai_api_models.py
|
{
"start": 1482,
"end": 1602
}
|
class ____(vLLMChatCompletionResponse):
model_config = ConfigDict(arbitrary_types_allowed=True)
|
ChatCompletionResponse
|
python
|
walkccc__LeetCode
|
solutions/2301. Match Substring After Replacement/2301.py
|
{
"start": 0,
"end": 710
}
|
class ____:
def matchReplacement(
self,
s: str,
sub: str,
mappings: list[list[str]],
) -> bool:
isMapped = [[False] * 128 for _ in range(128)]
for old, new in mappings:
isMapped[ord(old)][ord(new)] = True
for i in range(len(s)):
if self._canTransform(s, i, sub, isMapped):
return True
return False
def _canTransform(
self,
s: str,
start: int,
sub: str,
isMapped: list[list[bool]],
) -> bool:
if start + len(sub) > len(s):
return False
for i in range(len(sub)):
a = sub[i]
b = s[start + i]
if a != b and not isMapped[ord(a)][ord(b)]:
return False
return True
|
Solution
|
python
|
ethereum__web3.py
|
web3/_utils/encoding.py
|
{
"start": 7487,
"end": 8739
}
|
class ____(BaseArrayEncoder):
is_dynamic = True
def encode(self, value: Sequence[Any]) -> bytes:
encoded_elements = self.encode_elements(value) # type: ignore[no-untyped-call]
encoded_value = encoded_elements
return encoded_value
# TODO: Replace with eth-abi packed encoder once web3 requires eth-abi>=2
def encode_single_packed(_type: TypeStr, value: Any) -> bytes:
import codecs
from eth_abi import (
grammar as abi_type_parser,
)
from eth_abi.registry import (
has_arrlist,
registry,
)
abi_type = abi_type_parser.parse(_type)
if has_arrlist(_type): # type: ignore[no-untyped-call]
item_encoder = registry.get_encoder(abi_type.item_type.to_type_str())
if abi_type.arrlist[-1] != 1:
return DynamicArrayPackedEncoder(item_encoder=item_encoder).encode(value) # type: ignore[no-untyped-call] # noqa: E501
else:
raise NotImplementedError(
"Fixed arrays are not implemented in this packed encoder prototype"
)
elif abi_type.base == "string":
return codecs.encode(value, "utf8")
elif abi_type.base == "bytes":
return value
return None
|
DynamicArrayPackedEncoder
|
python
|
plotly__plotly.py
|
plotly/graph_objs/treemap/marker/_colorbar.py
|
{
"start": 233,
"end": 61634
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "treemap.marker"
_path_str = "treemap.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.treemap.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.treemap.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.treemap.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.treemap.marker
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
treemap.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.treemap.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.treemap.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.treemap.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.treema
p.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
treemap.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.treemap.marker.colorbar.Ti
tle` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.treemap.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.treema
p.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
treemap.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.treemap.marker.colorbar.Ti
tle` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.treemap.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
ColorBar
|
python
|
modin-project__modin
|
modin/pandas/io.py
|
{
"start": 30834,
"end": 41184
}
|
class ____(ClassLogger, pandas.ExcelFile): # noqa: PR01, D200
"""
Class for parsing tabular excel sheets into DataFrame objects.
"""
_behave_like_pandas = False
def _set_pandas_mode(self): # noqa
# disable Modin behavior to be able to pass object to `pandas.read_excel`
# otherwise, Modin objects may be passed to the pandas context, resulting
# in undefined behavior
self._behave_like_pandas = True
def __getattribute__(self, item):
if item in ["_set_pandas_mode", "_behave_like_pandas"]:
return object.__getattribute__(self, item)
default_behaviors = ["__init__", "__class__"]
method = super(ExcelFile, self).__getattribute__(item)
if not self._behave_like_pandas and item not in default_behaviors:
if callable(method):
def return_handler(*args, **kwargs):
"""
Replace the default behavior of methods with inplace kwarg.
Returns
-------
A Modin DataFrame in place of a pandas DataFrame, or the same
return type as pandas.ExcelFile.
Notes
-----
This function will replace all of the arguments passed to
methods of ExcelFile with the pandas equivalent. It will convert
Modin DataFrame to pandas DataFrame, etc.
"""
# We don't want to constantly be giving this error message for
# internal methods.
if item[0] != "_":
_maybe_warn_on_default("`{}`".format(item))
args = [
(
to_pandas(arg)
if isinstance(arg, ModinObjects.DataFrame)
else arg
)
for arg in args
]
kwargs = {
k: to_pandas(v) if isinstance(v, ModinObjects.DataFrame) else v
for k, v in kwargs.items()
}
obj = super(ExcelFile, self).__getattribute__(item)(*args, **kwargs)
if isinstance(obj, pandas.DataFrame):
return ModinObjects.DataFrame(obj)
return obj
# We replace the method with `return_handler` for inplace operations
method = return_handler
return method
@wrap_free_function_in_argument_caster("from_non_pandas")
def from_non_pandas(df, index, columns, dtype) -> DataFrame | None:
"""
Convert a non-pandas DataFrame into Modin DataFrame.
Parameters
----------
df : object
Non-pandas DataFrame.
index : object
Index for non-pandas DataFrame.
columns : object
Columns for non-pandas DataFrame.
dtype : type
Data type to force.
Returns
-------
modin.pandas.DataFrame
Converted DataFrame.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
new_qc = FactoryDispatcher.from_non_pandas(df, index, columns, dtype)
if new_qc is not None:
return ModinObjects.DataFrame(query_compiler=new_qc)
return new_qc
@wrap_free_function_in_argument_caster("from_pandas")
def from_pandas(df) -> DataFrame:
"""
Convert a pandas DataFrame to a Modin DataFrame.
Parameters
----------
df : pandas.DataFrame
The pandas DataFrame to convert.
Returns
-------
modin.pandas.DataFrame
A new Modin DataFrame object.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(query_compiler=FactoryDispatcher.from_pandas(df))
@wrap_free_function_in_argument_caster("from_arrow")
def from_arrow(at) -> DataFrame:
"""
Convert an Arrow Table to a Modin DataFrame.
Parameters
----------
at : Arrow Table
The Arrow Table to convert from.
Returns
-------
DataFrame
A new Modin DataFrame object.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(query_compiler=FactoryDispatcher.from_arrow(at))
@wrap_free_function_in_argument_caster("from_dataframe")
def from_dataframe(df: ProtocolDataframe) -> DataFrame:
"""
Convert a DataFrame implementing the dataframe interchange protocol to a Modin DataFrame.
See more about the protocol in https://data-apis.org/dataframe-protocol/latest/index.html.
Parameters
----------
df : ProtocolDataframe
An object supporting the dataframe interchange protocol.
Returns
-------
DataFrame
A new Modin DataFrame object.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(
query_compiler=FactoryDispatcher.from_interchange_dataframe(df)
)
@wrap_free_function_in_argument_caster("from_ray")
def from_ray(ray_obj) -> DataFrame:
"""
Convert a Ray Dataset into Modin DataFrame.
Parameters
----------
ray_obj : ray.data.Dataset
The Ray Dataset to convert from.
Returns
-------
DataFrame
A new Modin DataFrame object.
Notes
-----
Ray Dataset can only be converted to Modin DataFrame if Modin uses a Ray engine.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(query_compiler=FactoryDispatcher.from_ray(ray_obj))
@wrap_free_function_in_argument_caster("from_dask")
def from_dask(dask_obj) -> DataFrame:
"""
Convert a Dask DataFrame to a Modin DataFrame.
Parameters
----------
dask_obj : dask.dataframe.DataFrame
The Dask DataFrame to convert from.
Returns
-------
DataFrame
A new Modin DataFrame object.
Notes
-----
Dask DataFrame can only be converted to Modin DataFrame if Modin uses a Dask engine.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(query_compiler=FactoryDispatcher.from_dask(dask_obj))
@wrap_free_function_in_argument_caster("from_map")
def from_map(func, iterable, *args, **kwargs) -> DataFrame:
"""
Create a Modin DataFrame from map function applied to an iterable object.
This method will construct a Modin DataFrame split by row partitions.
The number of row partitions matches the number of elements in the iterable object.
Parameters
----------
func : callable
Function to map across the iterable object.
iterable : Iterable
An iterable object.
*args : tuple
Positional arguments to pass in `func`.
**kwargs : dict
Keyword arguments to pass in `func`.
Returns
-------
DataFrame
A new Modin DataFrame object.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return ModinObjects.DataFrame(
query_compiler=FactoryDispatcher.from_map(func, iterable, *args, *kwargs)
)
@wrap_free_function_in_argument_caster("to_pandas")
def to_pandas(modin_obj: SupportsPublicToPandas) -> DataFrame | Series:
"""
Convert a Modin DataFrame/Series to a pandas DataFrame/Series.
Parameters
----------
modin_obj : modin.DataFrame, modin.Series
The Modin DataFrame/Series to convert.
Returns
-------
pandas.DataFrame or pandas.Series
Converted object with type depending on input.
"""
return modin_obj._to_pandas()
@wrap_free_function_in_argument_caster("to_numpy")
def to_numpy(
modin_obj: Union[SupportsPrivateToNumPy, SupportsPublicToNumPy],
) -> np.ndarray:
"""
Convert a Modin object to a NumPy array.
Parameters
----------
modin_obj : modin.DataFrame, modin.Series, modin.numpy.array
The Modin distributed object to convert.
Returns
-------
numpy.array
Converted object with type depending on input.
"""
if isinstance(modin_obj, SupportsPrivateToNumPy):
return modin_obj._to_numpy()
array = modin_obj.to_numpy()
if ModinNumpy.get():
array = array._to_numpy()
return array
@wrap_free_function_in_argument_caster("to_ray")
def to_ray(modin_obj):
"""
Convert a Modin DataFrame/Series to a Ray Dataset.
Parameters
----------
modin_obj : modin.pandas.DataFrame, modin.pandas.Series
The DataFrame/Series to convert.
Returns
-------
ray.data.Dataset
Converted object with type depending on input.
Notes
-----
Modin DataFrame/Series can only be converted to a Ray Dataset if Modin uses a Ray engine.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return FactoryDispatcher.to_ray(modin_obj)
@wrap_free_function_in_argument_caster("to_dask")
def to_dask(modin_obj):
"""
Convert a Modin DataFrame/Series to a Dask DataFrame/Series.
Parameters
----------
modin_obj : modin.pandas.DataFrame, modin.pandas.Series
The Modin DataFrame/Series to convert.
Returns
-------
dask.dataframe.DataFrame or dask.dataframe.Series
Converted object with type depending on input.
Notes
-----
Modin DataFrame/Series can only be converted to a Dask DataFrame/Series if Modin uses a Dask engine.
"""
from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher
return FactoryDispatcher.to_dask(modin_obj)
__all__ = [
"ExcelFile",
"HDFStore",
"json_normalize",
"read_clipboard",
"read_csv",
"read_excel",
"read_feather",
"read_fwf",
"read_gbq",
"read_hdf",
"read_html",
"read_json",
"read_orc",
"read_parquet",
"read_pickle",
"read_sas",
"read_spss",
"read_sql",
"read_sql_query",
"read_sql_table",
"read_stata",
"read_table",
"read_xml",
"from_non_pandas",
"from_pandas",
"from_arrow",
"from_dataframe",
"to_pickle",
"to_pandas",
"to_numpy",
]
|
ExcelFile
|
python
|
hynek__structlog
|
tests/test_dev.py
|
{
"start": 26550,
"end": 33886
}
|
class ____:
def test_level_styles_roundtrip(self):
"""
The level_styles property can be set and retrieved.
"""
cr = dev.ConsoleRenderer(colors=True)
custom = {"info": "X", "error": "Y"}
cr.level_styles = custom
assert cr.level_styles is custom
assert cr._level_styles is custom
@pytest.mark.parametrize("colors", [True, False])
def test_set_level_styles_none_resets_to_defaults(self, colors):
"""
Setting level_styles to None resets to defaults.
"""
cr = dev.ConsoleRenderer(colors=colors)
cr.level_styles = {"info": "X"}
cr.level_styles = None
assert (
dev.ConsoleRenderer.get_default_level_styles(colors=colors)
== cr._level_styles
)
def test_roundtrip_pad_level(self):
"""
The pad_level property can be set and retrieved.
"""
cr = dev.ConsoleRenderer(pad_level=True)
assert cr.pad_level is True
assert cr._pad_level is True
cr.pad_level = False
assert cr.pad_level is False
assert cr._pad_level is False
cr.pad_level = True
assert cr.pad_level is True
assert cr._pad_level is True
def test_roundtrip_pad_event_to(self):
"""
The pad_event_to property can be set and retrieved.
"""
cr = dev.ConsoleRenderer()
assert cr.pad_event_to == dev._EVENT_WIDTH
assert cr._pad_event_to == dev._EVENT_WIDTH
cr.pad_event_to = 50
assert cr.pad_event_to == 50
assert cr._pad_event_to == 50
cr.pad_event_to = 20
assert cr.pad_event_to == 20
assert cr._pad_event_to == 20
def test_repr_native_str_property(self, cr):
"""
The repr_native_str property can be set and retrieved, and affects formatting.
"""
cr = dev.ConsoleRenderer(colors=False, repr_native_str=False)
assert False is cr.repr_native_str
assert "event key=plain" == cr(
None, None, {"event": "event", "key": "plain"}
)
cr.repr_native_str = True
assert "event key='plain'" == cr(
None, None, {"event": "event", "key": "plain"}
)
def test_pad_event_deprecation_warning(self, recwarn):
"""
Using pad_event argument raises a deprecation warning.
"""
dev.ConsoleRenderer(pad_event=42)
(w,) = recwarn.list
assert (
"The `pad_event` argument is deprecated. Use `pad_event_to` instead."
) == w.message.args[0]
assert w.category is DeprecationWarning
def test_pad_event_to_param_raises_value_error(self):
"""
Using pad_event_to and pad_event raises a ValueError.
"""
with pytest.raises(ValueError): # noqa: PT011
dev.ConsoleRenderer(pad_event_to=42, pad_event=42)
def test_same_value_resets_level_styles(self, cr):
"""
Setting colors to the same value resets the level styles to the
defaults.
"""
val = cr.colors
cr._level_styles = {"info": "X", "error": "Y"}
cr.colors = cr.colors
assert val is cr.colors
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=val)
)
@pytest.mark.skipif(
dev._IS_WINDOWS and dev.colorama is None,
reason="Toggling colors=True requires colorama on Windows",
)
def test_toggle_colors_updates_styles_and_levels(self):
"""
Toggling colors updates the styles and level styles to colorful styles.
"""
cr = dev.ConsoleRenderer(colors=False)
assert cr.colors is False
assert cr._colors is False
assert cr._styles is dev._plain_styles
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=False)
)
cr.colors = True
assert cr.colors is True
assert cr._colors is True
assert cr._styles is dev._colorful_styles
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=True)
)
@pytest.mark.skipif(
dev._IS_WINDOWS and dev.colorama is None,
reason="Toggling colors=True requires colorama on Windows",
)
def test_toggle_colors_resets_custom_level_styles(self):
"""
Toggling colors resets the level styles to the defaults for the new
color setting.
"""
custom = {"info": "X", "error": "Y"}
cr = dev.ConsoleRenderer(colors=False, level_styles=custom)
assert custom == cr._level_styles
cr.colors = True
assert (
dev.ConsoleRenderer.get_default_level_styles(colors=True)
== cr._level_styles
)
# And switching back follows defaults for the new setting again
cr.colors = False
assert (
dev.ConsoleRenderer.get_default_level_styles(colors=False)
== cr._level_styles
)
def test_same_force_colors_value_resets_level_styles(self, cr):
"""
Setting force_colors to the same value resets the level styles to the
defaults.
"""
val = cr.force_colors
cr._level_styles = {"info": "X", "error": "Y"}
cr.force_colors = cr.force_colors
assert val is cr.force_colors
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=cr.colors)
)
def test_toggle_force_colors_updates_styles_and_levels(self):
"""
Setting force_colors to the same value resets the level styles to the
defaults.
"""
cr = dev.ConsoleRenderer(colors=True, force_colors=False)
assert cr.force_colors is False
assert cr._force_colors is False
assert cr._styles is dev.ConsoleRenderer.get_default_column_styles(
colors=True, force_colors=False
)
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=True)
)
cr.force_colors = True
assert cr.force_colors is True
assert cr._force_colors is True
assert cr._styles is dev.ConsoleRenderer.get_default_column_styles(
colors=True, force_colors=True
)
assert (
cr._level_styles
== dev.ConsoleRenderer.get_default_level_styles(colors=True)
)
def test_toggle_force_colors_resets_custom_level_styles(self):
"""
Toggling force_colors resets the level styles to the defaults for the
new force_colors setting.
"""
custom = {"info": "X", "error": "Y"}
cr = dev.ConsoleRenderer(colors=True, level_styles=custom)
assert custom == cr._level_styles
cr.force_colors = True
assert (
dev.ConsoleRenderer.get_default_level_styles(colors=True)
== cr._level_styles
)
cr.force_colors = False
assert (
dev.ConsoleRenderer.get_default_level_styles(colors=True)
== cr._level_styles
)
|
TestConsoleRendererProperties
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/axislines.py
|
{
"start": 6536,
"end": 8756
}
|
class ____(_FloatingAxisArtistHelperBase):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super().__init__(nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
fixed_coord = 1 - self.nth_coord
data_to_axes = axes.transData - axes.transAxes
p = data_to_axes.transform([self._value, self._value])
return Path(self._to_xy((0, 1), const=p[fixed_coord]))
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
Return the label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
angle = [0, 90][self.nth_coord]
fixed_coord = 1 - self.nth_coord
data_to_axes = axes.transData - axes.transAxes
p = data_to_axes.transform([self._value, self._value])
verts = self._to_xy(0.5, const=p[fixed_coord])
return (verts, angle) if 0 <= verts[fixed_coord] <= 1 else (None, None)
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord]
major = self.axis.major
major_locs = major.locator()
major_labels = major.formatter.format_ticks(major_locs)
minor = self.axis.minor
minor_locs = minor.locator()
minor_labels = minor.formatter.format_ticks(minor_locs)
data_to_axes = axes.transData - axes.transAxes
def _f(locs, labels):
for loc, label in zip(locs, labels):
c = self._to_xy(loc, const=self._value)
c1, c2 = data_to_axes.transform(c)
if 0 <= c1 <= 1 and 0 <= c2 <= 1:
yield c, angle_normal, angle_tangent, label
return _f(major_locs, major_labels), _f(minor_locs, minor_labels)
|
FloatingAxisArtistHelperRectilinear
|
python
|
google__pytype
|
pytype/pytd/booleq.py
|
{
"start": 1865,
"end": 3124
}
|
class ____(BooleanTerm):
"""Class for representing "FALSE"."""
def simplify(self, assignments):
return self
def __repr__(self):
return "FALSE"
def __str__(self):
return "FALSE"
def extract_pivots(self, assignments):
return {}
def extract_equalities(self):
return ()
TRUE = TrueValue()
FALSE = FalseValue()
def simplify_exprs(exprs, result_type, stop_term, skip_term):
"""Simplify a set of subexpressions for a conjunction or disjunction.
Args:
exprs: An iterable. The subexpressions.
result_type: _And or _Or. The type of result (unless it simplifies down to
something simpler).
stop_term: FALSE for _And, TRUE for _Or. If this term is encountered, it
will be immediately returned.
skip_term: TRUE for _And, FALSE for _Or. If this term is encountered, it
will be ignored.
Returns:
A BooleanTerm.
"""
expr_set = set()
for e in exprs:
if e is stop_term:
return stop_term
elif e is skip_term:
continue
elif isinstance(e, result_type):
expr_set = expr_set.union(e.exprs)
else:
expr_set.add(e)
if len(expr_set) > 1:
return result_type(expr_set)
elif expr_set:
return expr_set.pop()
else:
return skip_term
|
FalseValue
|
python
|
getsentry__sentry
|
fixtures/sudo_testutils.py
|
{
"start": 496,
"end": 1012
}
|
class ____(TestCase):
def setUp(self):
self.request = self.get("/foo")
self.request.session = {}
self.setUser(AnonymousUser())
def get(self, *args, **kwargs):
return RequestFactory().get(*args, **kwargs)
def post(self, *args, **kwargs):
return RequestFactory().post(*args, **kwargs)
def setUser(self, user):
self.user = self.request.user = user
def login(self, user_class=User):
user = user_class()
self.setUser(user)
|
BaseTestCase
|
python
|
numpy__numpy
|
numpy/lib/tests/test_function_base.py
|
{
"start": 172740,
"end": 173631
}
|
class ____:
@pytest.mark.parametrize("type_in, type_out", [
('l', 'D'),
('h', 'F'),
('H', 'F'),
('b', 'F'),
('B', 'F'),
('g', 'G'),
])
def test_sort_real(self, type_in, type_out):
# sort_complex() type casting for real input types
a = np.array([5, 3, 6, 2, 1], dtype=type_in)
actual = np.sort_complex(a)
expected = np.sort(a).astype(type_out)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
def test_sort_complex(self):
# sort_complex() handling of complex input
a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
actual = np.sort_complex(a)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
|
TestSortComplex
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/integration/cloud/nios.py
|
{
"start": 1986,
"end": 2583
}
|
class ____(CloudEnvironment):
"""NIOS environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
ansible_vars = dict(
nios_provider=dict(
host=self._get_cloud_config('NIOS_HOST'),
username='admin',
password='infoblox',
),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
|
NiosEnvironment
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-array-pairs-divisible-by-k.py
|
{
"start": 152,
"end": 888
}
|
class ____(object):
def countPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def gcd(x, y):
while y:
x, y = y, x%y
return x
cnt = collections.Counter()
for x in nums:
cnt[gcd(x, k)] += 1
result = 0
for x in cnt.iterkeys():
for y in cnt.iterkeys():
if x > y or x*y%k:
continue
result += cnt[x]*cnt[y] if x != y else cnt[x]*(cnt[x]-1)//2
return result
# Time: O(nlogk + n * sqrt(k))
# Space: O(sqrt(k)), number of factors of k is at most sqrt(k)
import collections
# math, number theory
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/mllama/modeling_mllama.py
|
{
"start": 9192,
"end": 11284
}
|
class ____(nn.Module):
def __init__(self, config: MllamaVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.attention_heads
self.head_dim = config.hidden_size // config.attention_heads
self.scaling = self.head_dim**-0.5
self.num_key_value_groups = 1
self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.embed_dim, bias=False)
def forward(
self,
hidden_state: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
query = self.q_proj(hidden_state)
key = self.k_proj(hidden_state)
value = self.v_proj(hidden_state)
batch_size, q_seq_len, _ = query.shape
_, kv_seq_len, _ = key.shape
query = query.view(batch_size, q_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
key = key.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask,
dropout=0.0,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, q_seq_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
MllamaVisionAttention
|
python
|
apache__avro
|
lang/py/avro/errors.py
|
{
"start": 2263,
"end": 2391
}
|
class ____(AvroTypeException):
"""Raised when a default value isn't a suitable type for the schema."""
|
InvalidDefaultException
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/window_test.py
|
{
"start": 10652,
"end": 11241
}
|
class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self):
dataset = dataset_ops.Dataset.range(42).window(6).interleave(
lambda x: x, cycle_length=2, num_parallel_calls=2)
return dataset
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
verify_fn(self, self._build_dataset, num_outputs=42)
if __name__ == "__main__":
test.main()
|
WindowCheckpointTest
|
python
|
mlflow__mlflow
|
mlflow/tensorflow/__init__.py
|
{
"start": 31536,
"end": 34406
}
|
class ____:
"""
Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that
``predict(data: pandas.DataFrame) -> pandas.DataFrame``. For TensorFlow versions >= 2.0.0.
"""
def __init__(self, model, infer):
"""
Args:
model: A Tensorflow SavedModel.
infer: Tensorflow function returned by a saved model that is used for inference.
"""
# Note: we need to retain the model reference in TF2Wrapper object, because the infer
# function in tensorflow will be `ConcreteFunction` which only retains WeakRefs to the
# variables they close over.
# See https://www.tensorflow.org/guide/function#deleting_tfvariables_between_function_calls
self.model = model
self.infer = infer
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.model
def predict(
self,
data,
params: dict[str, Any] | None = None,
):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
import tensorflow as tf
feed_dict = {}
if isinstance(data, dict):
feed_dict = {k: tf.constant(v) for k, v in data.items()}
elif isinstance(data, pandas.DataFrame):
for df_col_name in list(data):
# If there are multiple columns with the same name, selecting the shared name
# from the DataFrame will result in another DataFrame containing the columns
# with the shared name. TensorFlow cannot make eager tensors out of pandas
# DataFrames, so we convert the DataFrame to a numpy array here.
val = data[df_col_name]
val = val.values if isinstance(val, pandas.DataFrame) else np.array(val.to_list())
feed_dict[df_col_name] = tf.constant(val)
else:
raise TypeError("Only dict and DataFrame input types are supported")
raw_preds = self.infer(**feed_dict)
pred_dict = {col_name: raw_preds[col_name].numpy() for col_name in raw_preds.keys()}
for col in pred_dict.keys():
# If the output tensor is not 1-dimensional
# AND all elements have length of 1, flatten the array with `ravel()`
if len(pred_dict[col].shape) != 1 and all(
len(element) == 1 for element in pred_dict[col]
):
pred_dict[col] = pred_dict[col].ravel()
else:
pred_dict[col] = pred_dict[col].tolist()
if isinstance(data, dict):
return pred_dict
else:
return pandas.DataFrame.from_dict(data=pred_dict)
|
_TF2Wrapper
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classes1.py
|
{
"start": 778,
"end": 973
}
|
class ____(*args, **kwargs):
pass
def func1(x: type) -> object:
class Y(x):
pass
return Y()
# This should generate an error because a TypeVar can't be used as a base class.
|
J
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/workers.py
|
{
"start": 74459,
"end": 79130
}
|
class ____(Request):
"""
Register a worker in the system. Called by the Worker Daemon.
:param worker: Worker id. Must be unique in company.
:type worker: str
:param timeout: Registration timeout in seconds. If timeout seconds have passed
since the worker's last call to register or status_report, the worker is
automatically removed from the list of registered workers.
:type timeout: int
:param queues: List of queue IDs on which the worker is listening.
:type queues: Sequence[str]
:param tags: User tags for the worker
:type tags: Sequence[str]
:param system_tags: System tags for the worker
:type system_tags: Sequence[str]
"""
_service = "workers"
_action = "register"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"queues": {
"description": "List of queue IDs on which the worker is listening.",
"items": {"type": "string"},
"type": "array",
},
"system_tags": {
"description": "System tags for the worker",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": "array",
},
"timeout": {
"default": 600,
"description": "Registration timeout in seconds. If timeout seconds have passed since the worker's last call to register or status_report, the worker is automatically removed from the list of registered workers.",
"type": "integer",
},
"worker": {
"description": "Worker id. Must be unique in company.",
"type": "string",
},
},
"required": ["worker"],
"type": "object",
}
def __init__(
self,
worker: str,
timeout: Optional[int] = 600,
queues: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(RegisterRequest, self).__init__(**kwargs)
self.worker = worker
self.timeout = timeout
self.queues = queues
self.tags = tags
self.system_tags = system_tags
@schema_property("worker")
def worker(self) -> str:
return self._property_worker
@worker.setter
def worker(self, value: str) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
@schema_property("timeout")
def timeout(self) -> Optional[int]:
return self._property_timeout
@timeout.setter
def timeout(self, value: Optional[int]) -> None:
if value is None:
self._property_timeout = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "timeout", six.integer_types)
self._property_timeout = value
@schema_property("queues")
def queues(self) -> Optional[List[str]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
self.assert_isinstance(value, "queues", six.string_types, is_array=True)
self._property_queues = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
|
RegisterRequest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-every-row-and-column-contains-all-numbers.py
|
{
"start": 443,
"end": 862
}
|
class ____(object):
def checkValid(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
return all(reduce(lambda x, y: x^y, (matrix[i][j]^(j+1) for j in xrange(len(matrix[0])))) == 0 for i in xrange(len(matrix))) and \
all(reduce(lambda x, y: x^y, (matrix[i][j]^(i+1) for i in xrange(len(matrix)))) == 0 for j in xrange(len(matrix[0])))
|
Solution_Wrong
|
python
|
geekcomputers__Python
|
binary_search_trees/tree_node.py
|
{
"start": 59,
"end": 227
}
|
class ____:
def __init__(self, data: int) -> None:
self.data: int = data
self.left: Optional[Node] = None
self.right: Optional[Node] = None
|
Node
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/ingest/event_logger.py
|
{
"start": 1748,
"end": 9825
}
|
class ____(TypedDict):
payload: ReplayActionsEventClickPayload | ReplayActionsEventTapPayload
project_id: int
replay_id: str
retention_days: int
start_time: float
type: Literal["replay_event"]
@sentry_sdk.trace
def emit_tap_events(
tap_events: list[TapEvent],
project_id: int,
replay_id: str,
retention_days: int,
start_time: float,
event_cap: int = 20,
environment: str | None = None,
) -> None:
# Skip event emission if no taps specified.
if len(tap_events) == 0:
return None
taps: list[ReplayActionsEventPayloadTap] = [
{
"message": tap.message,
"view_id": tap.view_id,
"view_class": tap.view_class,
"timestamp": tap.timestamp,
"event_hash": encode_as_uuid(f"{replay_id}{tap.timestamp}{tap.view_id}"),
}
for tap in tap_events[:event_cap]
]
payload: ReplayActionsEventTapPayload = {
"environment": environment or "",
"replay_id": replay_id,
"type": "replay_tap",
"taps": taps,
}
action: ReplayActionsEvent = {
"project_id": project_id,
"replay_id": replay_id,
"retention_days": retention_days,
"start_time": start_time,
"type": "replay_event",
"payload": payload,
}
publish_replay_event(json.dumps(action))
@sentry_sdk.trace
def emit_click_events(
click_events: list[ClickEvent],
project_id: int,
replay_id: str,
retention_days: int,
start_time: float,
event_cap: int = 20,
environment: str | None = None,
) -> None:
# Skip event emission if no clicks specified.
if len(click_events) == 0:
return None
clicks: list[ReplayActionsEventPayloadClick] = [
{
"alt": click.alt,
"aria_label": click.aria_label,
"class": click.classes,
"component_name": click.component_name,
"event_hash": encode_as_uuid(f"{replay_id}{click.timestamp}{click.node_id}"),
"id": click.id,
"is_dead": click.is_dead,
"is_rage": click.is_rage,
"node_id": click.node_id,
"role": click.role,
"tag": click.tag,
"testid": click.testid,
"text": click.text,
"timestamp": click.timestamp,
"title": click.title,
}
for click in click_events[:event_cap]
]
payload: ReplayActionsEventClickPayload = {
"environment": environment or "",
"replay_id": replay_id,
"type": "replay_actions",
"clicks": clicks,
}
action: ReplayActionsEvent = {
"project_id": project_id,
"replay_id": replay_id,
"retention_days": retention_days,
"start_time": start_time,
"type": "replay_event",
"payload": payload,
}
publish_replay_event(json.dumps(action))
@sentry_sdk.trace
def emit_request_response_metrics(event_meta: ParsedEventMeta) -> None:
for sizes in event_meta.request_response_sizes:
req_size, res_size = sizes
if req_size:
metrics.distribution("replays.usecases.ingest.request_body_size", req_size, unit="byte")
if res_size:
metrics.distribution(
"replays.usecases.ingest.response_body_size", res_size, unit="byte"
)
@sentry_sdk.trace
def log_canvas_size(
event_meta: ParsedEventMeta, org_id: int, project_id: int, replay_id: str
) -> None:
for canvas_size in event_meta.canvas_sizes:
logger.info(
"sentry.replays.slow_click",
extra={
"event_type": "canvas_size",
"org_id": org_id,
"project_id": project_id,
"replay_id": replay_id,
"size": canvas_size,
},
)
@sentry_sdk.trace
def log_mutation_events(event_meta: ParsedEventMeta, project_id: int, replay_id: str) -> None:
# TODO: sampled differently from the rest (0 <= i <= 99)
# probably fine to ignore.
for mutation in event_meta.mutation_events:
log = mutation.payload.copy()
log["project_id"] = project_id
log["replay_id"] = replay_id
logger.info("Large DOM Mutations List:", extra=log)
@sentry_sdk.trace
def log_option_events(event_meta: ParsedEventMeta, project_id: int, replay_id: str) -> None:
for option in event_meta.options_events:
log = option["data"].get("payload", {}).copy()
log["project_id"] = project_id
log["replay_id"] = replay_id
logger.info("sentry.replays.slow_click", extra=log)
@sentry_sdk.trace
def log_multiclick_events(
event_meta: ParsedEventMeta,
project_id: int,
replay_id: str,
# Sample multiclick events at 0.2% rate
should_sample: Callable[[], bool] = lambda: random.random() < 0.002,
) -> None:
for multiclick in event_meta.multiclick_events:
if not should_sample():
continue
log = {
"event_type": "multi_click",
"project_id": project_id,
"replay_id": replay_id,
"alt": multiclick.click_event.alt,
"aria_label": multiclick.click_event.aria_label,
"classes": multiclick.click_event.classes,
"component_name": multiclick.click_event.component_name,
"id": multiclick.click_event.id,
"node_id": multiclick.click_event.node_id,
"role": multiclick.click_event.role,
"selector": multiclick.click_event.selector,
"tag": multiclick.click_event.tag,
"testid": multiclick.click_event.testid,
"text": multiclick.click_event.text[:100], # Truncate text for logging
"timestamp": multiclick.click_event.timestamp,
"url": multiclick.click_event.url or "",
"title": multiclick.click_event.title,
"click_count": multiclick.click_count,
}
logger.info("sentry.replays.slow_click", extra=log)
@sentry_sdk.trace
def log_rage_click_events(
event_meta: ParsedEventMeta,
project_id: int,
replay_id: str,
# Sample rage multiclick events at 0.2% rate
should_sample: Callable[[], bool] = lambda: random.random() < 0.002,
) -> None:
for click in event_meta.click_events:
if click.is_rage and should_sample():
log = {
"event_type": "rage_click",
"project_id": project_id,
"replay_id": replay_id,
"alt": click.alt,
"aria_label": click.aria_label,
"classes": click.classes,
"component_name": click.component_name,
"id": click.id,
"is_rage_click": True,
"is_dead_click": bool(click.is_dead),
"node_id": click.node_id,
"role": click.role,
"selector": click.selector,
"tag": click.tag,
"testid": click.testid,
"text": click.text[:100], # Truncate text for logging
"timestamp": click.timestamp,
"url": click.url or "",
"title": click.title,
}
logger.info("sentry.replays.slow_click", extra=log)
@sentry_sdk.trace
def report_hydration_error(
event_meta: ParsedEventMeta,
project_id: int,
replay_id: str,
replay_event: dict[str, Any] | None,
context: ProcessorContext,
) -> None:
metrics.incr("replay.hydration_error_breadcrumb", amount=len(event_meta.hydration_errors))
# Eagerly exit to prevent unnecessary I/O.
if (
len(event_meta.hydration_errors) == 0
or not replay_event
or not _should_report_hydration_error_issue(project_id, context)
):
return None
for error in event_meta.hydration_errors:
report_hydration_error_issue_with_replay_event(
project_id,
replay_id,
error.timestamp,
error.url,
replay_event,
)
|
ReplayActionsEvent
|
python
|
django-debug-toolbar__django-debug-toolbar
|
tests/test_toolbar.py
|
{
"start": 148,
"end": 557
}
|
class ____(BaseTestCase):
def test_empty_prefix_errors(self):
with self.assertRaises(ImproperlyConfigured):
debug_toolbar_urls(prefix="")
def test_empty_when_debug_is_false(self):
self.assertEqual(debug_toolbar_urls(), [])
def test_has_path(self):
with self.settings(DEBUG=True):
self.assertEqual(len(debug_toolbar_urls()), 1)
|
DebugToolbarUrlsTestCase
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_group_search_view_details_starred.py
|
{
"start": 1094,
"end": 3060
}
|
class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (MemberPermission,)
def post(self, request: Request, organization: Organization, view_id: int) -> Response:
"""
Update the starred status of a group search view for the current organization member.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = StarViewSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
is_starred = serializer.validated_data["starred"]
try:
view = GroupSearchView.objects.get(id=view_id, organization=organization)
except GroupSearchView.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if not (
view.user_id == request.user.id
or view.visibility == GroupSearchViewVisibility.ORGANIZATION
):
return Response(status=status.HTTP_404_NOT_FOUND)
if is_starred:
insert_position = (
serializer.validated_data["position"]
if "position" in serializer.validated_data
else GroupSearchViewStarred.objects.num_starred_views(organization, request.user.id)
)
if GroupSearchViewStarred.objects.insert_starred_view(
organization, request.user.id, view, insert_position
):
return Response(status=status.HTTP_200_OK)
else:
if GroupSearchViewStarred.objects.delete_starred_view(
organization, request.user.id, view
):
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_204_NO_CONTENT)
|
OrganizationGroupSearchViewDetailsStarredEndpoint
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/mixins.py
|
{
"start": 9841,
"end": 10978
}
|
class ____(OAuthLibMixin):
"""Mixin for protecting resources with client authentication as mentioned in rfc:`3.2.1`
This involves authenticating with any of: HTTP Basic Auth, Client Credentials and
Access token in that order. Breaks off after first validation.
"""
def dispatch(self, request, *args, **kwargs):
# let preflight OPTIONS requests pass
if request.method.upper() == "OPTIONS":
return super().dispatch(request, *args, **kwargs)
# Validate either with HTTP basic or client creds in request body.
# TODO: Restrict to POST.
valid = self.authenticate_client(request)
if not valid:
# Alternatively allow access tokens
# check if the request is valid and the protected resource may be accessed
valid, r = self.verify_request(request)
if valid:
request.resource_owner = r.user
return super().dispatch(request, *args, **kwargs)
return HttpResponseForbidden()
else:
return super().dispatch(request, *args, **kwargs)
|
ClientProtectedResourceMixin
|
python
|
jazzband__tablib
|
tests/test_tablib.py
|
{
"start": 43328,
"end": 46324
}
|
class ____(BaseTestCase):
def test_xls_format_detect(self):
"""Test the XLS format detection."""
in_stream = self.founders.xls
self.assertEqual(detect_format(in_stream), 'xls')
def test_xls_date_import(self):
xls_source = Path(__file__).parent / 'files' / 'dates.xls'
with xls_source.open('rb') as fh:
dset = tablib.Dataset().load(fh, 'xls')
self.assertEqual(dset.dict[0]['birth_date'], dt.datetime(2015, 4, 12, 0, 0))
def test_xlsx_import_set_skip_lines(self):
data.append(('garbage', 'line', ''))
data.append(('', '', ''))
data.append(('id', 'name', 'description'))
_xls = data.xls
new_data = tablib.Dataset().load(_xls, skip_lines=2)
self.assertEqual(new_data.headers, ['id', 'name', 'description'])
def test_xls_import_with_errors(self):
"""Errors from imported files are kept as errors."""
xls_source = Path(__file__).parent / 'files' / 'errors.xls'
with xls_source.open('rb') as fh:
data = tablib.Dataset().load(fh.read())
self.assertEqual(
data.dict[0],
{
'div by 0': '#DIV/0!',
'name unknown': '#NAME?',
'not available (formula)': '#N/A',
'not available (static)': '#N/A',
}
)
def test_book_import_from_stream(self):
in_stream = self.founders.xls
book = tablib.Databook().load(in_stream, 'xls')
self.assertEqual(book.sheets()[0].title, 'Founders')
def test_xls_export_with_dates(self):
date = dt.date(2019, 10, 4)
time = dt.time(14, 30)
date_time = dt.datetime(2019, 10, 4, 12, 30, 8)
data.append((date, time, date_time))
data.headers = ('date', 'time', 'date/time')
_xls = data.xls
xls_book = xlrd.open_workbook(file_contents=_xls, formatting_info=True)
row = xls_book.sheet_by_index(0).row(1)
def get_format_str(cell):
return xls_book.format_map[xls_book.xf_list[cell.xf_index].format_key].format_str
self.assertEqual('m/d/yy', get_format_str(row[0]))
self.assertEqual('h:mm:ss', get_format_str(row[1]))
self.assertEqual('m/d/yy h:mm', get_format_str(row[2]))
def test_xls_bad_chars_sheet_name(self):
"""
Sheet names are limited to 30 chars and the following chars
are not permitted: \\ / * ? : [ ]
"""
_dataset = tablib.Dataset(
title='bad name \\/*?:[]qwertyuiopasdfghjklzxcvbnm'
)
_xls = _dataset.export('xls')
new_data = tablib.Dataset().load(_xls)
self.assertEqual(new_data.title, 'bad name -------qwertyuiopasdfg')
_book = tablib.Databook()
_book.add_sheet(_dataset)
_xls = _book.export('xls')
new_data = tablib.Databook().load(_xls, 'xls')
self.assertEqual(new_data.sheets()[0].title, 'bad name -------qwertyuiopasdfg')
|
XLSTests
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 130326,
"end": 131538
}
|
class ____(Response):
"""
Response of tasks.delete_configuration endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_configuration"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteConfigurationResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
|
DeleteConfigurationResponse
|
python
|
PyCQA__pylint
|
tests/regrtest_data/hang/pkg4972/string/__init__.py
|
{
"start": 62,
"end": 126
}
|
class ____(string.Formatter):
pass
string.Formatter = Fake
|
Fake
|
python
|
facebook__pyre-check
|
client/commands/infer.py
|
{
"start": 3292,
"end": 6080
}
|
class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
global_annotations: List[RawGlobalAnnotation] = dataclasses.field(
metadata=dataclasses_json.config(field_name="globals"), default_factory=list
)
attribute_annotations: List[RawAttributeAnnotation] = dataclasses.field(
metadata=dataclasses_json.config(field_name="attributes"), default_factory=list
)
define_annotations: List[RawDefineAnnotation] = dataclasses.field(
metadata=dataclasses_json.config(field_name="defines"), default_factory=list
)
class ParsingError(Exception):
pass
@staticmethod
def create_from_string(input: str) -> "RawInferOutput":
try:
# pyre-fixme[7]: Imprecise return type of `loads()`
return RawInferOutput.cached_schema().loads(input)
except (
TypeError,
KeyError,
ValueError,
dataclasses_json.mm.ValidationError,
) as error:
raise RawInferOutput.ParsingError(str(error)) from error
@staticmethod
def create_from_json(input: Dict[str, object]) -> "RawInferOutput":
return RawInferOutput.create_from_string(json.dumps(input))
def qualifiers_by_path(self) -> Dict[str, str]:
return {
annotation.location.path: annotation.location.qualifier
for annotation in itertools.chain(
self.global_annotations,
self.attribute_annotations,
self.define_annotations,
)
}
def split_by_path(self) -> "Dict[str, RawInferOutputForPath]":
def create_index(
annotations: Sequence[TAnnotation],
) -> Dict[str, List[TAnnotation]]:
result: Dict[str, List[TAnnotation]] = {}
for annotation in annotations:
key = annotation.location.path
result.setdefault(key, []).append(annotation)
return result
qualifiers_by_path = self.qualifiers_by_path()
global_annotation_index = create_index(self.global_annotations)
attribute_annotation_index = create_index(self.attribute_annotations)
define_annotation_index = create_index(self.define_annotations)
return {
path: RawInferOutputForPath(
global_annotations=global_annotation_index.get(path, []),
attribute_annotations=attribute_annotation_index.get(path, []),
define_annotations=define_annotation_index.get(path, []),
qualifier=qualifiers_by_path[path],
)
for path in global_annotation_index.keys()
| attribute_annotation_index.keys()
| define_annotation_index.keys()
}
@dataclasses.dataclass(frozen=True)
|
RawInferOutput
|
python
|
huggingface__transformers
|
tests/kernels/test_kernels.py
|
{
"start": 14485,
"end": 16114
}
|
class ____(TestCasePlus):
@classmethod
def setUpClass(cls):
cls.model_id = "unsloth/Llama-3.2-1B-Instruct"
cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id, use_kernels=False, device_map=torch_device)
@classmethod
def tearDownClass(cls):
# Delete large objects to drop references early
if hasattr(cls, "model"):
try:
del cls.model
except Exception:
pass
def tearDown(self):
# Free accelerator memory/cache and trigger GC
cleanup(torch_device, gc_collect=True)
def test_setting_use_kernels_twice_does_not_rekernelize(self):
call_count = {"n": 0}
def spy_kernelize(*args, **kwargs):
call_count["n"] += 1
with patch.object(kernels_pkg, "kernelize", side_effect=spy_kernelize):
self.model.use_kernels = True
self.assertTrue(self.model.use_kernels)
self.assertEqual(call_count["n"], 1)
self.model.use_kernels = True
self.assertEqual(call_count["n"], 1)
def test_train_eval_calls_kernelize_with_correct_mode(self):
last_modes = []
def spy_kernelize(model, device=None, mode=None):
last_modes.append(mode)
with patch.object(kernels_pkg, "kernelize", side_effect=spy_kernelize):
self.model.use_kernels = True
self.model.train(True)
self.assertTrue(any(m == Mode.TRAINING for m in last_modes))
self.model.eval()
self.assertTrue(any(m == Mode.INFERENCE for m in last_modes))
|
TestUseKernelsLifecycle
|
python
|
pytorch__pytorch
|
test/distributed/test_c10d_common.py
|
{
"start": 2390,
"end": 4307
}
|
class ____:
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "(?i)timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///{}".format(f.name.replace("\\", "/"))
f.close()
else:
yield f"file://{f.name}"
f.close()
yield f"tcp://127.0.0.1:{common.find_free_port():d}"
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError(f"Unexpected type {type(c2p[0])}")
|
AbstractTimeoutTest
|
python
|
pytorch__pytorch
|
torch/ao/nn/qat/modules/conv.py
|
{
"start": 3939,
"end": 5753
}
|
class ____(_ConvNd, nn.Conv1d):
r"""
A Conv1d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as :class:`~torch.nn.Conv1d`
Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
_FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: str | _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
qconfig=None,
device=None,
dtype=None,
) -> None:
kernel_size_ = _single(kernel_size)
stride_ = _single(stride)
padding_ = padding if isinstance(padding, str) else _single(padding)
dilation_ = _single(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride=stride_,
padding=padding_,
dilation=dilation_,
transposed=False,
output_padding=_single(0),
groups=groups,
bias=bias,
padding_mode=padding_mode,
qconfig=qconfig,
device=device,
dtype=dtype,
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
return super().from_float(
cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
|
Conv1d
|
python
|
facebookresearch__faiss
|
tests/test_factory.py
|
{
"start": 7232,
"end": 7447
}
|
class ____(unittest.TestCase):
def test_1(self):
self.assertEqual(
factory_tools.get_code_size(50, "IVF32,Flat,Refine(PQ25x12)"),
50 * 4 + (25 * 12 + 7) // 8
)
|
TestCodeSize
|
python
|
great-expectations__great_expectations
|
great_expectations/core/partitioners.py
|
{
"start": 1778,
"end": 2313
}
|
class ____(pydantic.BaseModel):
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_converted_datetime"] = "partition_on_converted_datetime"
date_format_string: str
ColumnPartitioner = Union[
PartitionerColumnValue,
PartitionerMultiColumnValue,
PartitionerDividedInteger,
PartitionerModInteger,
ColumnPartitionerYearly,
ColumnPartitionerMonthly,
ColumnPartitionerDaily,
PartitionerDatetimePart,
PartitionerConvertedDatetime,
]
|
PartitionerConvertedDatetime
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_emoji.py
|
{
"start": 1324,
"end": 1895
}
|
class ____(util.MdCase):
"""Test new style index."""
extension = [
'pymdownx.emoji'
]
extension_configs = {
'pymdownx.emoji': {
'emoji_index': _new_style_index,
'options': {'append_alias': [(':grin:', ":smile:")]}
}
}
def test_new_index(self):
"""Test that we can pass options using the new style."""
self.check_markdown(
':grin:',
f'<p><img alt="\U0001f604" class="twemoji" src="{TWEMOJI_PNG_CDN}1f604.png" title=":grin:" /></p>'
)
|
TestEmojiNewIndex
|
python
|
getsentry__sentry
|
src/sentry/relay/config/__init__.py
|
{
"start": 46260,
"end": 48890
}
|
class ____(_ConfigBase):
"""
Represents the restricted configuration available to an untrusted
"""
def __init__(self, project: Project, **kwargs: Any) -> None:
object.__setattr__(self, "project", project)
super().__init__(**kwargs)
def _load_filter_settings(flt: _FilterSpec, project: Project) -> Mapping[str, Any]:
"""
Returns the filter settings for the specified project
:param flt: the filter function
:param project: the project for which we want to retrieve the options
:return: a dictionary with the filter options.
If the project does not explicitly specify the filter options then the
default options for the filter will be returned
"""
filter_id = flt.id
filter_key = f"filters:{filter_id}"
setting = project.get_option(filter_key)
return _filter_option_to_config_setting(flt, setting)
def _filter_option_to_config_setting(flt: _FilterSpec, setting: str) -> Mapping[str, Any]:
"""
Encapsulates the logic for associating a filter database option with the filter setting from project_config
:param flt: the filter
:param setting: the option deserialized from the database
:return: the option as viewed from project_config
"""
if setting is None:
raise ValueError(
"Could not find filter state for filter {}."
" You need to register default filter state in projectoptions.defaults.".format(flt.id)
)
is_enabled = setting != "0"
ret_val: dict[str, bool | Sequence[str]] = {"isEnabled": is_enabled}
# special case for legacy browser.
# If the number of special cases increases we'll have to factor this functionality somewhere
if flt.id == FilterStatKeys.LEGACY_BROWSER:
if is_enabled:
if setting == "1":
ret_val["options"] = ["default"]
else:
# new style filter, per legacy browser type handling
# ret_val['options'] = setting.split(' ')
ret_val["options"] = list(setting)
elif flt.id == FilterStatKeys.HEALTH_CHECK:
if is_enabled:
ret_val = {"patterns": HEALTH_CHECK_GLOBS, "isEnabled": True}
else:
ret_val = {"patterns": [], "isEnabled": False}
return ret_val
#: Version of the transaction metrics extraction.
#: When you increment this version, outdated Relays will stop extracting
#: transaction metrics.
#: See https://github.com/getsentry/relay/blob/6181c6e80b9485ed394c40bc860586ae934704e2/relay-dynamic-config/src/metrics.rs#L85
TRANSACTION_METRICS_EXTRACTION_VERSION = 6
|
ProjectConfig
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 630074,
"end": 630401
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("SponsorsActivity", graphql_name="node")
|
SponsorsActivityEdge
|
python
|
doocs__leetcode
|
solution/0400-0499/0424.Longest Repeating Character Replacement/Solution.py
|
{
"start": 0,
"end": 324
}
|
class ____:
def characterReplacement(self, s: str, k: int) -> int:
cnt = Counter()
l = mx = 0
for r, c in enumerate(s):
cnt[c] += 1
mx = max(mx, cnt[c])
if r - l + 1 - mx > k:
cnt[s[l]] -= 1
l += 1
return len(s) - l
|
Solution
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis06.py
|
{
"start": 315,
"end": 1324
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "pie"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_title({"name": "Title"})
chart.set_x_axis({"name": "XXX"})
chart.set_y_axis({"name": "YYY"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
openai__gym
|
gym/error.py
|
{
"start": 528,
"end": 662
}
|
class ____(UnregisteredEnv):
"""Raised when the user requests an env from the registry where the name doesn't exist."""
|
NameNotFound
|
python
|
kamyu104__LeetCode-Solutions
|
Python/the-knights-tour.py
|
{
"start": 75,
"end": 1439
}
|
class ____(object):
def tourOfKnight(self, m, n, r, c):
"""
:type m: int
:type n: int
:type r: int
:type c: int
:rtype: List[List[int]]
"""
DIRECTIONS = ((1, 2), (-1, 2), (1, -2), (-1, -2),
(2, 1), (-2, 1), (2, -1), (-2, -1))
def backtracking(r, c, i):
def degree(x):
cnt = 0
r, c = x
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if 0 <= nr < m and 0 <= nc < n and result[nr][nc] == -1:
cnt += 1
return cnt
if i == m*n:
return True
candidates = []
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if 0 <= nr < m and 0 <= nc < n and result[nr][nc] == -1:
candidates.append((nr, nc))
for nr, nc in sorted(candidates, key=degree): # warnsdorff's rule
result[nr][nc] = i
if backtracking(nr, nc, i+1):
return True
result[nr][nc] = -1
return False
result = [[-1]*n for _ in xrange(m)]
result[r][c] = 0
backtracking(r, c, 1)
return result
# Time: O(8^(m * n - 1))
# Space: O(1)
# backtracking
|
Solution
|
python
|
django__django
|
django/contrib/auth/views.py
|
{
"start": 12545,
"end": 13005
}
|
class ____(PasswordContextMixin, TemplateView):
template_name = "registration/password_reset_complete.html"
title = _("Password reset complete")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["login_url"] = resolve_url(settings.LOGIN_URL)
return context
@method_decorator(
[sensitive_post_parameters(), csrf_protect, login_required], name="dispatch"
)
|
PasswordResetCompleteView
|
python
|
sympy__sympy
|
sympy/polys/domains/quotientring.py
|
{
"start": 416,
"end": 2541
}
|
class ____:
"""
Class representing elements of (commutative) quotient rings.
Attributes:
- ring - containing ring
- data - element of ring.ring (i.e. base ring) representing self
"""
def __init__(self, ring, data):
self.ring = ring
self.data = data
def __str__(self):
from sympy.printing.str import sstr
data = self.ring.ring.to_sympy(self.data)
return sstr(data) + " + " + str(self.ring.base_ideal)
__repr__ = __str__
def __bool__(self):
return not self.ring.is_zero(self)
def __add__(self, om):
if not isinstance(om, self.__class__) or om.ring != self.ring:
try:
om = self.ring.convert(om)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring(self.data + om.data)
__radd__ = __add__
def __neg__(self):
return self.ring(self.data*self.ring.ring.convert(-1))
def __sub__(self, om):
return self.__add__(-om)
def __rsub__(self, om):
return (-self).__add__(om)
def __mul__(self, o):
if not isinstance(o, self.__class__):
try:
o = self.ring.convert(o)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring(self.data*o.data)
__rmul__ = __mul__
def __rtruediv__(self, o):
return self.ring.revert(self)*o
def __truediv__(self, o):
if not isinstance(o, self.__class__):
try:
o = self.ring.convert(o)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring.revert(o)*self
def __pow__(self, oth):
if oth < 0:
return self.ring.revert(self) ** -oth
return self.ring(self.data ** oth)
def __eq__(self, om):
if not isinstance(om, self.__class__) or om.ring != self.ring:
return False
return self.ring.is_zero(self - om)
def __ne__(self, om):
return not self == om
|
QuotientRingElement
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_socketlevel.py
|
{
"start": 82783,
"end": 84926
}
|
class ____(SocketDummyServerTestCase):
def test_enforce_content_length_get(self) -> None:
done_event = Event()
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 22\r\n"
b"Content-type: text/plain\r\n"
b"\r\n"
b"hello, world"
)
done_event.wait(LONG_TIMEOUT)
sock.close()
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port, maxsize=1) as conn:
# Test stream read when content length less than headers claim
get_response = conn.request(
"GET", url="/", preload_content=False, enforce_content_length=True
)
data = get_response.stream(100)
with pytest.raises(ProtocolError, match="12 bytes read, 10 more expected"):
next(data)
done_event.set()
def test_enforce_content_length_no_body(self) -> None:
done_event = Event()
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 22\r\n"
b"Content-type: text/plain\r\n"
b"\r\n"
)
done_event.wait(1)
sock.close()
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port, maxsize=1) as conn:
# Test stream on 0 length body
head_response = conn.request(
"HEAD", url="/", preload_content=False, enforce_content_length=True
)
data = [chunk for chunk in head_response.stream(1)]
assert len(data) == 0
done_event.set()
|
TestBadContentLength
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 34657,
"end": 35404
}
|
class ____(TestCase):
def test_even(self):
actual = list(mi.interleave_longest([1, 4, 7], [2, 5, 8], [3, 6, 9]))
expected = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
def test_short(self):
actual = list(mi.interleave_longest([1, 4], [2, 5, 7], [3, 6, 8]))
expected = [1, 2, 3, 4, 5, 6, 7, 8]
self.assertEqual(actual, expected)
def test_mixed_types(self):
it_list = ['a', 'b', 'c', 'd']
it_str = '12345'
it_gen = (x for x in range(3))
actual = list(mi.interleave_longest(it_list, it_str, it_gen))
expected = ['a', '1', 0, 'b', '2', 1, 'c', '3', 2, 'd', '4', '5']
self.assertEqual(actual, expected)
|
InterleaveLongestTests
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/asyncpg.py
|
{
"start": 9842,
"end": 9899
}
|
class ____(BYTEA):
render_bind_cast = True
|
AsyncpgByteA
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/test_cumulative.py
|
{
"start": 229,
"end": 3278
}
|
class ____:
# ---------------------------------------------------------------------
# Cumulative Operations - cumsum, cummax, ...
def test_cumulative_ops_smoke(self):
# it works
df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
df.cummax()
df.cummin()
df.cumsum()
dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))
# TODO(wesm): do something with this?
dm.cumsum()
def test_cumprod_smoke(self, datetime_frame):
datetime_frame.iloc[5:10, 0] = np.nan
datetime_frame.iloc[10:15, 1] = np.nan
datetime_frame.iloc[15:, 2] = np.nan
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cumulative_ops_match_series_apply(
self, datetime_frame, all_numeric_accumulations
):
datetime_frame.iloc[5:10, 0] = np.nan
datetime_frame.iloc[10:15, 1] = np.nan
datetime_frame.iloc[15:, 2] = np.nan
# axis = 0
result = getattr(datetime_frame, all_numeric_accumulations)()
expected = datetime_frame.apply(getattr(Series, all_numeric_accumulations))
tm.assert_frame_equal(result, expected)
# axis = 1
result = getattr(datetime_frame, all_numeric_accumulations)(axis=1)
expected = datetime_frame.apply(
getattr(Series, all_numeric_accumulations), axis=1
)
tm.assert_frame_equal(result, expected)
# fix issue TODO: GH ref?
assert np.shape(result) == np.shape(datetime_frame)
def test_cumsum_preserve_dtypes(self):
# GH#19296 dont incorrectly upcast to object
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]})
result = df.cumsum()
expected = DataFrame(
{
"A": Series([1, 3, 6], dtype=np.int64),
"B": Series([1, 3, 6], dtype=np.float64),
"C": df["C"].cumsum(),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["cumsum", "cumprod", "cummin", "cummax"])
@pytest.mark.parametrize("axis", [0, 1])
def test_numeric_only_flag(self, method, axis):
df = DataFrame(
{
"int": [1, 2, 3],
"bool": [True, False, False],
"string": ["a", "b", "c"],
"float": [1.0, 3.5, 4.0],
"datetime": [
Timestamp(2018, 1, 1),
Timestamp(2019, 1, 1),
Timestamp(2020, 1, 1),
],
}
)
df_numeric_only = df.drop(["string", "datetime"], axis=1)
result = getattr(df, method)(axis=axis, numeric_only=True)
expected = getattr(df_numeric_only, method)(axis)
tm.assert_frame_equal(result, expected)
|
TestDataFrameCumulativeOps
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/base_aws.py
|
{
"start": 44687,
"end": 46589
}
|
class ____(AwsGenericHook[Union[boto3.client, boto3.resource]]): # noqa: UP007
"""
Base class for interact with AWS.
This class provide a thin wrapper around the boto3 Python library.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param client_type: Reference to :external:py:meth:`boto3.client service_name \
<boto3.session.Session.client>`, e.g. 'emr', 'batch', 's3', etc.
Mutually exclusive with ``resource_type``.
:param resource_type: Reference to :external:py:meth:`boto3.resource service_name \
<boto3.session.Session.resource>`, e.g. 's3', 'ec2', 'dynamodb', etc.
Mutually exclusive with ``client_type``.
:param config: Configuration for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
def resolve_session_factory() -> type[BaseSessionFactory]:
"""Resolve custom SessionFactory class."""
clazz = conf.getimport("aws", "session_factory", fallback=None)
if not clazz:
return BaseSessionFactory
if not issubclass(clazz, BaseSessionFactory):
raise TypeError(
f"Your custom AWS SessionFactory class `{clazz.__name__}` is not a subclass "
f"of `{BaseSessionFactory.__name__}`."
)
return clazz
SessionFactory = resolve_session_factory()
|
AwsBaseHook
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/audio_test.py
|
{
"start": 1260,
"end": 11918
}
|
class ____(DeltaGeneratorTestCase):
def test_st_audio_from_bytes(self):
"""Test st.audio using fake audio bytes."""
# Fake audio data: expect the resultant mimetype to be audio default.
fake_audio_data = b"\x11\x22\x33\x44\x55\x66"
st.audio(fake_audio_data)
el = self.get_delta_from_queue().new_element
# locate resultant file in InMemoryFileManager and test its properties.
file_id = _calculate_file_id(fake_audio_data, "audio/wav")
media_file = self.media_file_storage.get_file(file_id)
assert media_file is not None
assert media_file.mimetype == "audio/wav"
assert self.media_file_storage.get_url(file_id) == el.audio.url
@parameterized.expand(
[
([],), # empty arr
([1, 2, 3, 4],), # 1d array
([[34, 15], [78, 98], [23, 78]],), # 2d numpy array
]
)
def test_st_audio_valid_numpy_array(self, arr):
"""Test st.audio using fake audio from empty, 1d, 2d numpy array."""
sample_rate = 44100
# Fake audio data: expect the resultant mimetype to be audio default.
fake_audio_np_array = np.array(arr)
st.audio(fake_audio_np_array, sample_rate=sample_rate)
computed_bytes = _maybe_convert_to_wav_bytes(
fake_audio_np_array, sample_rate=sample_rate
)
el = self.get_delta_from_queue().new_element
# locate resultant file in InMemoryFileManager and test its properties.
file_id = _calculate_file_id(computed_bytes, "audio/wav")
media_file = self.media_file_storage.get_file(file_id)
assert media_file is not None
assert media_file.mimetype == "audio/wav"
assert self.media_file_storage.get_url(file_id) == el.audio.url
assert media_file.content == computed_bytes
@parameterized.expand(
[
(
np.linspace(1, 10, num=300).reshape((10, 10, 3)), # 3d numpy array
3,
"Numpy array audio input must be a 1D or 2D array.",
),
(
np.linspace(1, 10, num=300).reshape((10, 2, 5, 3)), # 4d numpy array
4,
"Numpy array audio input must be a 1D or 2D array.",
),
(
np.empty((2, 0, 0, 0)), # 4d empty numpy array
4,
"Numpy array audio input must be a 1D or 2D array.",
),
]
)
def test_st_audio_invalid_numpy_array(self, np_arr, expected_shape, exception_text):
"""Test st.audio using invalid numpy array."""
sample_rate = 44100
assert len(np_arr.shape) == expected_shape
with pytest.raises(StreamlitAPIException) as e:
st.audio(np_arr, sample_rate=sample_rate)
assert str(e.value) == exception_text
def test_st_audio_missing_sample_rate_numpy_arr(self):
"""Test st.audio raises exception when sample_rate missing in case of valid
numpy array."""
valid_np_array = np.array([1, 2, 3, 4, 5])
with pytest.raises(StreamlitAPIException) as e:
st.audio(valid_np_array)
assert (
str(e.value)
== "`sample_rate` must be specified when `data` is a numpy array."
)
def test_st_audio_sample_rate_raises_warning(self):
"""Test st.audio raises streamlit warning when sample_rate parameter provided,
but data is not a numpy array."""
fake_audio_data = b"\x11\x22\x33\x44\x55\x66"
sample_rate = 44100
st.audio(fake_audio_data, sample_rate=sample_rate)
c = self.get_delta_from_queue(-2).new_element.alert
assert c.format == AlertProto.WARNING
assert (
c.body
== "Warning: `sample_rate` will be ignored since data is not a numpy array."
)
def test_maybe_convert_to_wave_numpy_arr_empty(self):
"""Test _maybe_convert_to_wave_bytes works correctly with empty numpy array."""
sample_rate = 44100
fake_audio_np_array = np.array([])
computed_bytes = _maybe_convert_to_wav_bytes(
fake_audio_np_array, sample_rate=sample_rate
)
assert computed_bytes == (
b"RIFF$\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00data"
b"\x00\x00\x00\x00"
)
def test_maybe_convert_to_wave_numpy_arr_mono(self):
"""Test _maybe_convert_to_wave_bytes works correctly with 1d numpy array."""
sample_rate = 7
fake_audio_np_array = np.array([1, 9])
computed_bytes = _maybe_convert_to_wav_bytes(
fake_audio_np_array, sample_rate=sample_rate
)
assert computed_bytes == (
b"RIFF(\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x01\x00\x07\x00\x00\x00\x0e\x00\x00\x00"
b"\x02\x00\x10\x00data\x04\x00\x00\x008\x0e\xff\x7f"
)
def test_maybe_convert_to_wave_numpy_arr_stereo(self):
"""Test _maybe_convert_to_wave_bytes works correctly with 2d numpy array."""
sample_rate = 44100
left_channel = np.array([1, 9])
right_channel = np.array([6, 1])
fake_audio_np_array = np.array([left_channel, right_channel])
computed_bytes = _maybe_convert_to_wav_bytes(
fake_audio_np_array, sample_rate=sample_rate
)
assert computed_bytes == (
b"RIFF,\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x02\x00D\xac\x00\x00\x10\xb1\x02\x00"
b"\x04\x00\x10\x00data\x08\x00\x00\x008\x0eTU\xff\x7f8\x0e"
)
def test_maybe_convert_to_wave_bytes_with_sample_rate(self):
"""Test _maybe_convert_to_wave_bytes works correctly with bytes."""
fake_audio_data_bytes = b"\x11\x22\x33\x44\x55\x66"
sample_rate = 44100
computed_bytes = _maybe_convert_to_wav_bytes(
fake_audio_data_bytes, sample_rate=sample_rate
)
assert computed_bytes == fake_audio_data_bytes
def test_maybe_convert_to_wave_bytes_without_sample_rate(self):
"""Test _maybe_convert_to_wave_bytes works correctly when sample_rate
is None."""
np_arr = np.array([0, 1, 2, 3])
computed_bytes = _maybe_convert_to_wav_bytes(np_arr, sample_rate=None)
assert computed_bytes is np_arr
@pytest.mark.require_integration
def test_st_audio_from_file(self):
"""Test st.audio using generated data in a file-like object."""
from scipy.io import wavfile
sample_rate = 44100
frequency = 440
length = 5
# Produces a 5 second Audio-File
t = np.linspace(0, length, sample_rate * length)
# Has frequency of 440Hz
y = np.sin(frequency * 2 * np.pi * t)
wavfile.write("test.wav", sample_rate, y)
with open("test.wav", "rb") as f:
st.audio(f)
el = self.get_delta_from_queue().new_element
assert ".wav" in el.audio.url
os.remove("test.wav")
def test_st_audio_from_url(self):
"""We can pass a URL directly to st.audio."""
# Test using a URL instead of data
some_url = "https://www.soundhelix.com/examples/mp3/SoundHelix-Song-3.mp3"
st.audio(some_url)
el = self.get_delta_from_queue().new_element
assert el.audio.url == some_url
def test_st_audio_raises_on_bad_filename(self):
"""A non-URL string is assumed to be a filename. A file we can't
open will result in an error.
"""
with pytest.raises(MediaFileStorageError):
st.audio("not/a/real/file")
def test_st_audio_from_none(self):
"""st.audio(None) is not an error."""
st.audio(None)
el = self.get_delta_from_queue().new_element
assert el.audio.url == ""
def test_st_audio_other_inputs(self):
"""Test that our other data types don't result in an error."""
st.audio(b"bytes_data")
st.audio(b"str_data")
st.audio(BytesIO(b"bytesio_data"))
st.audio(np.array([0, 1, 2, 3]), sample_rate=44100)
def test_st_audio_options(self):
"""Test st.audio with options."""
fake_audio_data = b"\x11\x22\x33\x44\x55\x66"
st.audio(
fake_audio_data,
format="audio/mp3",
start_time=10,
end_time=21,
loop=True,
autoplay=True,
)
el = self.get_delta_from_queue().new_element
assert el.audio.start_time == 10
assert el.audio.end_time == 21
assert el.audio.loop
assert el.audio.autoplay
assert el.audio.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_audio_data, "audio/mp3"), el.audio.url
def test_st_audio_just_data(self):
"""Test st.audio with just data specified."""
fake_audio_data = b"\x11\x22\x33\x44\x55\x66"
st.audio(fake_audio_data)
el = self.get_delta_from_queue().new_element
assert el.audio.start_time == 0
assert el.audio.end_time == 0
assert not el.audio.loop
assert not el.audio.autoplay
assert el.audio.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_audio_data, "audio/wav"), el.audio.url
@parameterized.expand(
[
("1s", None, (1, None)),
("1m", None, (60, None)),
("1m2s", None, (62, None)),
(0, "1m", (0, 60)),
("1h2m3s", None, (3723, None)),
("1m2s", "1m10s", (62, 70)),
("10 seconds", "15 seconds", (10, 15)),
("3 minutes 10 seconds", "3 minutes 20 seconds", (190, 200)),
]
)
def test_parse_start_time_end_time_success(
self, input_start_time, input_end_time, expected_value
):
"""Test that _parse_start_time_end_time works correctly."""
assert (
_parse_start_time_end_time(input_start_time, input_end_time)
== expected_value
)
@parameterized.expand(
[
("INVALID_VALUE", None, "Failed to convert 'start_time' to a timedelta"),
(5, "INVALID_VALUE", "Failed to convert 'end_time' to a timedelta"),
]
)
def test_parse_start_time_end_time_fail(self, start_time, end_time, exception_text):
"""Test that _parse_start_time_end_time works with correct exception text."""
with pytest.raises(StreamlitAPIException) as e:
_parse_start_time_end_time(start_time, end_time)
assert exception_text in str(e.value)
assert "INVALID_VALUE" in str(e.value)
|
AudioTest
|
python
|
numpy__numpy
|
numpy/lib/_utils_impl.py
|
{
"start": 3487,
"end": 23499
}
|
class ____:
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
if old_name is None:
old_name = func.__name__
if new_name is None:
depdoc = f"`{old_name}` is deprecated!"
else:
depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
if message is not None:
depdoc += "\n" + message
@functools.wraps(func)
def newfunc(*args, **kwds):
warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
newfunc.__name__ = old_name
doc = func.__doc__
if doc is None:
doc = depdoc
else:
lines = doc.expandtabs().split('\n')
indent = _get_indent(lines[1:])
if lines[0].lstrip():
# Indent the original first line to let inspect.cleandoc()
# dedent the docstring despite the deprecation notice.
doc = indent * ' ' + doc
else:
# Remove the same leading blank lines as cleandoc() would.
skip = len(lines[0]) + 1
for line in lines[1:]:
if len(line) > indent:
break
skip += len(line) + 1
doc = doc[skip:]
depdoc = textwrap.indent(depdoc, ' ' * indent)
doc = f'{depdoc}\n\n{doc}'
newfunc.__doc__ = doc
return newfunc
def _get_indent(lines):
"""
Determines the leading whitespace that could be removed from all the lines.
"""
indent = sys.maxsize
for line in lines:
content = len(line.lstrip())
if content:
indent = min(indent, len(line) - content)
if indent == sys.maxsize:
indent = 0
return indent
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
.. deprecated:: 2.0
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.lib.utils.deprecate(np.uint)
DeprecationWarning: `uint64` is deprecated! # may vary
>>> olduint(6)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`deprecate` is deprecated, "
"use `warn` with `DeprecationWarning` instead. "
"(deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
if args:
fn = args[0]
args = args[1:]
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
def deprecate_with_doc(msg):
"""
Deprecates a function and includes the deprecation in its docstring.
.. deprecated:: 2.0
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
This function is used as a decorator. It returns an object that can be
used to issue a DeprecationWarning, by passing the to-be decorated
function as argument, this adds warning to the to-be decorated function's
docstring and returns the new function object.
See Also
--------
deprecate : Decorate a function such that it issues a
:exc:`DeprecationWarning`
Parameters
----------
msg : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
obj : object
"""
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`deprecate` is deprecated, "
"use `warn` with `DeprecationWarning` instead. "
"(deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
return _Deprecate(message=msg)
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works similarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__: module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=None):
"""Provide information about ndarray obj.
Parameters
----------
obj : ndarray
Must be ndarray, not checked.
output
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
if output is None:
output = sys.stdout
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}",
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print(f"{tic}{sys.byteorder}{tic}", file=output)
byteswap = False
elif endian == '>':
print(f"{tic}big{tic}", file=output)
byteswap = sys.byteorder != "big"
else:
print(f"{tic}little{tic}", file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print(f"type: {obj.dtype}", file=output)
@set_module('numpy')
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
Get help information for an array, function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is
an `ndarray` instance, information about the array is printed.
If `object` is a numpy object, its docstring is given. If it is
a string, available modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``None``, in which case ``sys.stdout`` will be used.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
When the argument is an array, information about the array is printed.
>>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
>>> np.info(a)
class: ndarray
shape: (2, 3)
strides: (24, 8)
itemsize: 8
aligned: True
contiguous: True
fortran: False
data pointer: 0x562b6e0d2860 # may vary
byteorder: little
byteswap: False
type: complex64
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import inspect
import pydoc
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if output is None:
output = sys.stdout
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print(f"\n *** Repeat reference found in {namestr} *** ",
file=output
)
else:
objlist.append(id(obj))
print(f" *** Found in {namestr} ***", file=output)
info(obj)
print("-" * maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print(f"Help for {object} not found.", file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object) or inspect.ismethod(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name + arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name + arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
public_methods = [meth for meth in methods if meth[0] != '_']
if public_methods:
print("\n\nMethods:\n", file=output)
for meth in public_methods:
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(f" {meth} -- {methstr}", file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def safe_eval(source):
"""
Protected string evaluation.
.. deprecated:: 2.0
Use `ast.literal_eval` instead.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
.. warning::
This function is identical to :py:meth:`ast.literal_eval` and
has the same security implications. It may not always be safe
to evaluate large input strings.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
ValueError: malformed node or string: <_ast.Call object at 0x...>
"""
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`safe_eval` is deprecated. Use `ast.literal_eval` instead. "
"Be aware of security implications, such as memory exhaustion "
"based attacks (deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
def _median_nancheck(data, result, axis):
"""
Utility function to check median result from data for NaN values at the end
and return NaN in that case. Input result can also be a MaskedArray.
Parameters
----------
data : array
Sorted input data to median function
result : Array or MaskedArray
Result of median function.
axis : int
Axis along which the median was computed.
Returns
-------
result : scalar or ndarray
Median or NaN in axes which contained NaN in the input. If the input
was an array, NaN will be inserted in-place. If a scalar, either the
input itself or a scalar NaN.
"""
if data.size == 0:
return result
potential_nans = data.take(-1, axis=axis)
n = np.isnan(potential_nans)
# masked NaN values are ok, although for masked the copyto may fail for
# unmasked ones (this was always broken) when the result is a scalar.
if np.ma.isMaskedArray(n):
n = n.filled(False)
if not n.any():
return result
# Without given output, it is possible that the current result is a
# numpy scalar, which is not writeable. If so, just return nan.
if isinstance(result, np.generic):
return potential_nans
# Otherwise copy NaNs (if there are any)
np.copyto(result, potential_nans, where=n)
return result
def _opt_info():
"""
Returns a string containing the CPU features supported
by the current build.
The format of the string can be explained as follows:
- Dispatched features supported by the running machine end with `*`.
- Dispatched features not supported by the running machine
end with `?`.
- Remaining features represent the baseline.
Returns:
str: A formatted string indicating the supported CPU features.
"""
from numpy._core._multiarray_umath import (
__cpu_baseline__,
__cpu_dispatch__,
__cpu_features__,
)
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
return ''
enabled_features = ' '.join(__cpu_baseline__)
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
enabled_features += f" {feature}*"
else:
enabled_features += f" {feature}?"
return enabled_features
def drop_metadata(dtype, /):
"""
Returns the dtype unchanged if it contained no metadata or a copy of the
dtype if it (or any of its structure dtypes) contained metadata.
This utility is used by `np.save` and `np.savez` to drop metadata before
saving.
.. note::
Due to its limitation this function may move to a more appropriate
home or change in the future and is considered semi-public API only.
.. warning::
This function does not preserve more strange things like record dtypes
and user dtypes may simply return the wrong thing. If you need to be
sure about the latter, check the result with:
``np.can_cast(new_dtype, dtype, casting="no")``.
"""
if dtype.fields is not None:
found_metadata = dtype.metadata is not None
names = []
formats = []
offsets = []
titles = []
for name, field in dtype.fields.items():
field_dt = drop_metadata(field[0])
if field_dt is not field[0]:
found_metadata = True
names.append(name)
formats.append(field_dt)
offsets.append(field[1])
titles.append(None if len(field) < 3 else field[2])
if not found_metadata:
return dtype
structure = {
'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles,
'itemsize': dtype.itemsize}
# NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
return np.dtype(structure, align=dtype.isalignedstruct)
elif dtype.subdtype is not None:
# subarray dtype
subdtype, shape = dtype.subdtype
new_subdtype = drop_metadata(subdtype)
if dtype.metadata is None and new_subdtype is subdtype:
return dtype
return np.dtype((new_subdtype, shape))
else:
# Normal unstructured dtype
if dtype.metadata is None:
return dtype
# Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
return np.dtype(dtype.str)
|
_Deprecate
|
python
|
huggingface__transformers
|
src/transformers/models/owlvit/modeling_owlvit.py
|
{
"start": 36943,
"end": 39184
}
|
class ____(nn.Module):
def __init__(self, config: OwlViTVisionConfig):
super().__init__()
self.config = config
self.embeddings = OwlViTVisionEmbeddings(config)
self.pre_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.encoder = OwlViTEncoder(config)
self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Cast the input to the expected `dtype`
expected_input_dtype = self.embeddings.patch_embedding.weight.dtype
pixel_values = pixel_values.to(expected_input_dtype)
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layernorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
OwlViTVisionTransformer
|
python
|
Textualize__rich
|
tests/test_repr.py
|
{
"start": 465,
"end": 638
}
|
class ____:
def __init__(self, foo: str, bar: Optional[int] = None, egg: int = 1):
self.foo = foo
self.bar = bar
self.egg = egg
@rich.repr.auto
|
Egg
|
python
|
gevent__gevent
|
src/gevent/_config.py
|
{
"start": 13112,
"end": 13807
}
|
class ____(BoolSettingMixin, Setting):
name = 'track_greenlet_tree'
environment_key = 'GEVENT_TRACK_GREENLET_TREE'
default = True
desc = """\
Should `Greenlet` objects track their spawning tree?
Setting this to a false value will make spawning `Greenlet`
objects and using `spawn_raw` faster, but the
``spawning_greenlet``, ``spawn_tree_locals`` and ``spawning_stack``
will not be captured. Setting this to a false value can also
reduce memory usage because capturing the stack captures
some information about Python frames.
.. versionadded:: 1.3b1
"""
## Monitoring settings
# All env keys should begin with GEVENT_MONITOR
|
TrackGreenletTree
|
python
|
ray-project__ray
|
python/ray/data/tests/unit/test_datatype.py
|
{
"start": 29988,
"end": 31461
}
|
class ____:
"""Test that pattern-matching types cannot be converted to concrete Arrow types."""
@pytest.mark.parametrize(
"pattern_type_factory",
[
lambda: DataType.list(),
lambda: DataType.large_list(),
lambda: DataType.struct(),
lambda: DataType.map(),
lambda: DataType.tensor(),
lambda: DataType.variable_shaped_tensor(),
lambda: DataType.temporal(),
],
)
def test_pattern_matching_to_arrow_dtype_raises(self, pattern_type_factory):
"""Test that calling to_arrow_dtype on pattern-matching types raises an error.
Pattern-matching types represent abstract type categories (e.g., "any list")
and cannot be converted to concrete Arrow types.
"""
dt = pattern_type_factory()
assert dt.is_pattern_matching()
with pytest.raises(ValueError, match="Cannot convert pattern-matching type"):
dt.to_arrow_dtype()
def test_pattern_matching_to_arrow_dtype_with_values_still_raises(self):
"""Test that even with values, pattern-matching types cannot be converted."""
dt = DataType.list()
assert dt.is_pattern_matching()
# Even with values provided, pattern-matching types shouldn't convert
with pytest.raises(ValueError, match="Cannot convert pattern-matching type"):
dt.to_arrow_dtype(values=[1, 2, 3])
|
TestPatternMatchingToArrowDtype
|
python
|
django__django
|
django/db/utils.py
|
{
"start": 6515,
"end": 9350
}
|
class ____:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next
# one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get("instance")
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func("db_for_read")
db_for_write = _router_func("db_for_write")
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
|
ConnectionRouter
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_run.py
|
{
"start": 3031,
"end": 3194
}
|
class ____(BaseModel):
"""DAG Run Collection serializer for responses."""
dag_runs: Iterable[DAGRunResponse]
total_entries: int
|
DAGRunCollectionResponse
|
python
|
python-visualization__folium
|
folium/plugins/timestamped_geo_json.py
|
{
"start": 216,
"end": 9135
}
|
class ____(JSCSSMixin, MacroElement):
"""
Creates a TimestampedGeoJson plugin from timestamped GeoJSONs to append
into a map with Map.add_child.
A geo-json is timestamped if:
* it contains only features of types LineString, MultiPoint, MultiLineString,
Polygon and MultiPolygon.
* each feature has a 'times' property with the same length as the
coordinates array.
* each element of each 'times' property is a timestamp in ms since epoch,
or in ISO string.
Eventually, you may have Point features with a 'times' property being an
array of length 1.
Parameters
----------
data: file, dict or str.
The timestamped geo-json data you want to plot.
* If file, then data will be read in the file and fully embedded in
Leaflet's javascript.
* If dict, then data will be converted to json and embedded in the
javascript.
* If str, then data will be passed to the javascript as-is.
transition_time: int, default 200.
The duration in ms of a transition from between timestamps.
loop: bool, default True
Whether the animation shall loop.
auto_play: bool, default True
Whether the animation shall start automatically at startup.
add_last_point: bool, default True
Whether a point is added at the last valid coordinate of a LineString.
period: str, default "P1D"
Used to construct the array of available times starting
from the first available time. Format: ISO8601 Duration
ex: 'P1M' 1/month, 'P1D' 1/day, 'PT1H' 1/hour, and 'PT1M' 1/minute
duration: str, default None
Period of time which the features will be shown on the map after their
time has passed. If None, all previous times will be shown.
Format: ISO8601 Duration
ex: 'P1M' 1/month, 'P1D' 1/day, 'PT1H' 1/hour, and 'PT1M' 1/minute
Examples
--------
>>> TimestampedGeoJson(
... {
... "type": "FeatureCollection",
... "features": [
... {
... "type": "Feature",
... "geometry": {
... "type": "LineString",
... "coordinates": [[-70, -25], [-70, 35], [70, 35]],
... },
... "properties": {
... "times": [1435708800000, 1435795200000, 1435881600000],
... "tooltip": "my tooltip text",
... },
... }
... ],
... }
... )
See https://github.com/socib/Leaflet.TimeDimension for more information.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
_getDisplayDateFormat: function(date){
var newdate = new moment(date);
console.log(newdate)
return newdate.format("{{this.date_options}}");
}
});
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{
period: {{ this.period|tojson }},
}
);
var timeDimensionControl = new L.Control.TimeDimensionCustom(
{{ this.options|tojavascript }}
);
{{this._parent.get_name()}}.addControl(this.timeDimensionControl);
var geoJsonLayer = L.geoJson({{this.data}}, {
pointToLayer: function (feature, latLng) {
if (feature.properties.icon == 'marker') {
if(feature.properties.iconstyle){
return new L.Marker(latLng, {
icon: L.icon(feature.properties.iconstyle)});
}
//else
return new L.Marker(latLng);
}
if (feature.properties.icon == 'circle') {
if (feature.properties.iconstyle) {
return new L.circleMarker(latLng, feature.properties.iconstyle)
};
//else
return new L.circleMarker(latLng);
}
//else
return new L.Marker(latLng);
},
style: function (feature) {
return feature.properties.style;
},
onEachFeature: function(feature, layer) {
if (feature.properties.popup) {
layer.bindPopup(feature.properties.popup);
}
if (feature.properties.tooltip) {
layer.bindTooltip(feature.properties.tooltip);
}
}
})
var {{this.get_name()}} = L.timeDimension.layer.geoJson(
geoJsonLayer,
{
updateTimeDimension: true,
addlastPoint: {{ this.add_last_point|tojson }},
duration: {{ this.duration }},
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
"""
) # noqa
default_js = [
(
"jquery3.7.1",
"https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.1/jquery.min.js",
),
(
"jqueryui1.10.2",
"https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js",
),
(
"iso8601",
"https://cdn.jsdelivr.net/npm/iso8601-js-period@0.2.1/iso8601.min.js",
),
(
"leaflet.timedimension",
"https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.min.js",
),
# noqa
(
"moment",
"https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.1/moment.min.js",
),
]
default_css = [
(
"highlight.js_css",
"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.4/styles/default.min.css",
),
(
"leaflet.timedimension_css",
"https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.control.css",
),
]
def __init__(
self,
data,
transition_time=200,
loop=True,
auto_play=True,
add_last_point=True,
period="P1D",
min_speed=0.1,
max_speed=10,
loop_button=False,
date_options="YYYY-MM-DD HH:mm:ss",
time_slider_drag_update=False,
duration=None,
speed_slider=True,
):
super().__init__()
self._name = "TimestampedGeoJson"
if "read" in dir(data):
self.embed = True
self.data = data.read()
elif type(data) is dict:
self.embed = True
self.data = json.dumps(data)
else:
self.embed = False
self.data = data
self.add_last_point = bool(add_last_point)
self.period = period
self.date_options = date_options
self.duration = "undefined" if duration is None else '"' + duration + '"'
self.options = remove_empty(
position="bottomleft",
min_speed=min_speed,
max_speed=max_speed,
auto_play=auto_play,
loop_button=loop_button,
time_slider_drag_update=time_slider_drag_update,
speed_slider=speed_slider,
player_options={
"transitionTime": int(transition_time),
"loop": loop,
"startOver": True,
},
)
def render(self, **kwargs):
assert isinstance(
self._parent, Map
), "TimestampedGeoJson can only be added to a Map object."
super().render(**kwargs)
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
if not self.embed:
raise ValueError("Cannot compute bounds of non-embedded GeoJSON.")
data = json.loads(self.data)
if "features" not in data.keys():
# Catch case when GeoJSON is just a single Feature or a geometry.
if not (isinstance(data, dict) and "geometry" in data.keys()):
# Catch case when GeoJSON is just a geometry.
data = {"type": "Feature", "geometry": data}
data = {"type": "FeatureCollection", "features": [data]}
return get_bounds(data, lonlat=True)
|
TimestampedGeoJson
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalarmath.py
|
{
"start": 28973,
"end": 30536
}
|
class ____(TestCase):
@parametrize("type_code", np.typecodes["AllInteger"])
def test_integer_hashes(self, type_code):
scalar = np.dtype(type_code).type
for i in range(128):
assert hash(i) == hash(scalar(i))
@parametrize("type_code", np.typecodes["AllFloat"])
def test_float_and_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi, np.inf, 3, 6.0]:
numpy_val = scalar(val)
# Cast back to Python, in case the NumPy scalar has less precision
if numpy_val.dtype.kind == "c":
val = complex(numpy_val)
else:
val = float(numpy_val)
assert val == numpy_val
assert hash(val) == hash(numpy_val)
if hash(float(np.nan)) != hash(float(np.nan)):
# If Python distinguishes different NaNs we do so too (gh-18833)
assert hash(scalar(np.nan)) != hash(scalar(np.nan))
@parametrize("type_code", np.typecodes["Complex"])
def test_complex_hashes(self, type_code):
# Test some complex valued hashes specifically:
scalar = np.dtype(type_code).type
for val in [np.pi + 1j, np.inf - 3j, 3j, 6.0 + 1j]:
numpy_val = scalar(val)
assert hash(complex(numpy_val)) == hash(numpy_val)
@contextlib.contextmanager
def recursionlimit(n):
o = sys.getrecursionlimit()
try:
sys.setrecursionlimit(n)
yield
finally:
sys.setrecursionlimit(o)
@instantiate_parametrized_tests
|
TestHash
|
python
|
ray-project__ray
|
python/ray/tests/test_memory_scheduling.py
|
{
"start": 435,
"end": 2153
}
|
class ____:
def __init__(self):
pass
def ping(self):
return "ok"
def test_memory_request():
try:
ray.init(num_cpus=1, _memory=200 * MB)
# fits first 2
a = Actor.remote()
b = Actor.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()], timeout=60.0, num_returns=2
)
assert len(ok) == 2
# does not fit
c = Actor.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
assert len(ok) == 0
finally:
ray.shutdown()
def test_object_store_memory_reporting():
try:
ray.init(num_cpus=1, object_store_memory=500 * MB)
wait_for_condition(lambda: object_store_memory(500 * MB))
x1 = ray.put(np.zeros(150 * 1024 * 1024, dtype=np.uint8))
wait_for_condition(lambda: object_store_memory(350 * MB))
x2 = ray.put(np.zeros(75 * 1024 * 1024, dtype=np.uint8))
wait_for_condition(lambda: object_store_memory(275 * MB))
del x1
del x2
wait_for_condition(lambda: object_store_memory(500 * MB))
finally:
ray.shutdown()
def test_object_store_memory_reporting_task():
@ray.remote
def f(x):
time.sleep(60)
try:
ray.init(num_cpus=1, object_store_memory=500 * MB)
wait_for_condition(lambda: object_store_memory(500 * MB))
x1 = f.remote(np.zeros(150 * 1024 * 1024, dtype=np.uint8))
wait_for_condition(lambda: object_store_memory(350 * MB))
ray.cancel(x1, force=True)
wait_for_condition(lambda: object_store_memory(500 * MB))
finally:
ray.shutdown()
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
Actor
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/parallel.py
|
{
"start": 4203,
"end": 5308
}
|
class ____(AbstractProgressInfoMatcher):
DOCKER_BUILDX_PROGRESS_MATCHER = re.compile(r"\s*#(\d*) ")
def __init__(self):
self.last_docker_build_lines: dict[str, str] = {}
def get_best_matching_lines(self, output: Output) -> list[str] | None:
last_lines, last_lines_no_colors = get_last_lines_of_file(output.file_name, num_lines=5)
best_progress: int = 0
best_line: str | None = None
for index, line in enumerate(last_lines_no_colors):
match = DockerBuildxProgressMatcher.DOCKER_BUILDX_PROGRESS_MATCHER.match(line)
if match:
docker_progress = int(match.group(1))
if docker_progress > best_progress:
best_progress = docker_progress
best_line = last_lines[index]
if best_line is None:
best_line = self.last_docker_build_lines.get(output.file_name)
else:
self.last_docker_build_lines[output.file_name] = best_line
if best_line is None:
return None
return [best_line]
|
DockerBuildxProgressMatcher
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py
|
{
"start": 2754,
"end": 2923
}
|
class ____[**Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]:
pass
|
TestTypeParams
|
python
|
run-llama__llama_index
|
llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/base.py
|
{
"start": 1117,
"end": 11122
}
|
class ____(Conversation, BaseVoiceAgent):
"""
Conversational AI session.
BETA: This API is subject to change without regard to backwards compatibility.
Attributes:
client (BaseElevenLabs): The ElevenLabs client to use for the conversation.
agent_id (str): The ID of the agent to converse with.
requires_auth (bool): Whether the agent requires authentication.
audio_interface (AudioInterface): The audio interface to use for input and output.
config (Optional[ConversationInitiationData]): The configuration for the conversation
client_tools (Optional[ClientTools]): The client tools to use for the conversation.
"""
interface: Optional[BaseVoiceAgentInterface]
client: BaseElevenLabs
requires_auth: bool
agent_id: str
tools: Optional[List[BaseTool]]
_last_message_id: int
_callback_agent_response: Callable
_callback_agent_response_correction: Callable
_callback_user_transcript: Callable
_callback_latency_measurement: Callable
_all_chat: Dict[int, List[ChatMessage]]
_messages: List[ChatMessage]
_events: List[BaseVoiceAgentEvent]
_thread: Optional[threading.Thread]
_should_stop: threading.Event
_conversation_id: Optional[str]
_last_interrupt_id: int
_ws: Optional[Connection]
def __init__(
self,
client: BaseElevenLabs,
agent_id: str,
requires_auth: bool,
interface: Optional[BaseVoiceAgentInterface] = None,
config: Optional[ConversationInitiationData] = None,
tools: Optional[List[BaseTool]] = None,
) -> None:
self.client = client
self.agent_id = agent_id
self.requires_auth = requires_auth
self.interface = interface
if not interface:
self.interface = ElevenLabsVoiceAgentInterface()
self.config = config or ConversationInitiationData()
client_tools = ClientTools()
if tools:
for tool in tools:
if tool.metadata.fn_schema is not None:
fn = make_function_from_tool_model(
model_cls=tool.metadata.fn_schema, tool=tool
)
client_tools.register(
tool_name=tool.metadata.get_name(), handler=fn
)
else:
warnings.warn(
f"Tool {tool.metadata.get_name()} could not added, since its function schema seems to be unavailable"
)
self.client_tools = client_tools or ClientTools()
self.client_tools.start()
self._callback_agent_response = callback_agent_message
self._callback_agent_response_correction = callback_agent_message_correction
self._callback_user_transcript = callback_user_message
self._callback_latency_measurement = callback_latency_measurement
self._latencies: List[int] = []
self._all_chat: Dict[int, List[ChatMessage]] = {}
self._messages: List[ChatMessage] = []
self._events: List[BaseVoiceAgentEvent] = []
self._current_message_id: int = 0
self._thread = None
self._ws: Optional[Connection] = None
self._should_stop = threading.Event()
self._conversation_id = None
self._last_interrupt_id = 0
def start(self, *args: Any, **kwargs: Any) -> None:
self.start_session()
def stop(self) -> None:
self.end_session()
self.wait_for_session_end()
def interrupt(self) -> None:
self.interface.interrupt()
def _run(self, ws_url: str):
with connect(ws_url, max_size=16 * 1024 * 1024) as ws:
self._ws = ws
ws.send(
json.dumps(
{
"type": "conversation_initiation_client_data",
"custom_llm_extra_body": self.config.extra_body,
"conversation_config_override": self.config.conversation_config_override,
"dynamic_variables": self.config.dynamic_variables,
}
)
)
self._ws = ws
def input_callback(audio):
try:
ws.send(
json.dumps(
{
"user_audio_chunk": base64.b64encode(audio).decode(),
}
)
)
except ConnectionClosedOK:
self.end_session()
except Exception as e:
print(f"Error sending user audio chunk: {e}")
self.end_session()
self.audio_interface.start(input_callback)
while not self._should_stop.is_set():
try:
message = json.loads(ws.recv(timeout=0.5))
if self._should_stop.is_set():
return
self.handle_message(message, ws)
except ConnectionClosedOK as e:
self.end_session()
except TimeoutError:
pass
except Exception as e:
print(f"Error receiving message: {e}")
self.end_session()
self._ws = None
def handle_message(self, message: Dict, ws: Any) -> None:
if message["type"] == "conversation_initiation_metadata":
event = message["conversation_initiation_metadata_event"]
self._events.append(
ConversationInitEvent(
type_t="conversation_initiation_metadata", **event
)
)
assert self._conversation_id is None
self._conversation_id = event["conversation_id"]
elif message["type"] == "audio":
event = message["audio_event"]
self._events.append(AudioEvent(type_t="audio", **event))
if int(event["event_id"]) <= self._last_interrupt_id:
return
audio = base64.b64decode(event["audio_base_64"])
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
audio=event["audio_base_64"],
)
self.audio_interface.output(audio)
elif message["type"] == "agent_response":
event = message["agent_response_event"]
self._events.append(AgentResponseEvent(type_t="agent_response", **event))
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["agent_response"].strip(),
)
elif message["type"] == "agent_response_correction":
event = message["agent_response_correction_event"]
self._events.append(
AgentResponseCorrectionEvent(
type_t="agent_response_correction", **event
)
)
self._callback_agent_response_correction(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["corrected_agent_response"].strip(),
)
elif message["type"] == "user_transcript":
self._current_message_id += 1
event = message["user_transcription_event"]
self._events.append(
UserTranscriptionEvent(type_t="user_transcript", **event)
)
self._callback_user_transcript(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["user_transcript"].strip(),
)
elif message["type"] == "interruption":
event = message["interruption_event"]
self._events.append(InterruptionEvent(type_t="interruption", **event))
self._last_interrupt_id = int(event["event_id"])
self.audio_interface.interrupt()
elif message["type"] == "ping":
event = message["ping_event"]
self._events.append(PingEvent(type_t="ping", **event))
ws.send(
json.dumps(
{
"type": "pong",
"event_id": event["event_id"],
}
)
)
if event["ping_ms"] is None:
event["ping_ms"] = 0
self._callback_latency_measurement(self._latencies, int(event["ping_ms"]))
elif message["type"] == "client_tool_call":
self._events.append(ClientToolCallEvent(type_t="client_tool_call", **event))
tool_call = message.get("client_tool_call", {})
tool_name = tool_call.get("tool_name")
parameters = {
"tool_call_id": tool_call["tool_call_id"],
**tool_call.get("parameters", {}),
}
def send_response(response):
if not self._should_stop.is_set():
ws.send(json.dumps(response))
self.client_tools.execute_tool(tool_name, parameters, send_response)
message = f"Calling tool: {tool_name} with parameters: {parameters}"
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
text=message,
)
else:
pass # Ignore all other message types.
self._messages = get_messages_from_chat(self._all_chat)
@property
def average_latency(self) -> Union[int, float]:
"""
Get the average latency of your conversational agent.
Returns:
The average latency if latencies are recorded, otherwise 0.
"""
if not self._latencies:
return 0
return mean(self._latencies)
|
ElevenLabsVoiceAgent
|
python
|
google__pytype
|
pytype/rewrite/abstract/containers_test.py
|
{
"start": 242,
"end": 424
}
|
class ____(test_utils.ContextfulTestBase):
"""Base class for constant tests."""
def const_var(self, const, name=None):
return self.ctx.consts[const].to_variable(name)
|
BaseTest
|
python
|
pypa__warehouse
|
warehouse/predicates.py
|
{
"start": 322,
"end": 786
}
|
class ____:
def __init__(self, val, config):
self.val = val
def text(self):
return f"domain = {self.val!r}"
phash = text
def __call__(self, info, request):
# Support running under the same instance for local development and for
# test.pypi.io which will continue to host it's own uploader.
if self.val is None:
return True
return is_same_domain(request.domain, self.val)
|
DomainPredicate
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/cluster.py
|
{
"start": 221,
"end": 566
}
|
class ____:
"""The properties of a single shard of a collection."""
collection: str
name: str
node: str
object_count: int
vector_indexing_status: Literal["READONLY", "INDEXING", "READY", "LAZY_LOADING"]
vector_queue_length: int
compressed: bool
loaded: Optional[bool] # not present in <1.24.x
@dataclass
|
Shard
|
python
|
getsentry__sentry
|
src/sentry/preprod/api/endpoints/project_preprod_check_for_updates.py
|
{
"start": 879,
"end": 1072
}
|
class ____(BaseModel):
id: str
build_version: str
build_number: int
release_notes: str | None
download_url: str
app_name: str
created_date: str
|
InstallableBuildDetails
|
python
|
falconry__falcon
|
falcon/bench/queues/api.py
|
{
"start": 1010,
"end": 2600
}
|
class ____:
def __init__(self, body, headers):
self._body = body
self._headers = headers
def process_response(self, req, resp, resource, req_succeeded):
user_agent = req.user_agent # NOQA
limit = req.get_param('limit') or '10' # NOQA
resp.status = falcon.HTTP_200
resp.data = self._body
resp.set_headers(self._headers)
resp.vary = ('X-Auth-Token', 'Accept-Encoding')
resp.content_range = (0, len(self._body), len(self._body) + 100)
def create(body, headers):
queue_collection = queues.CollectionResource()
queue_item = queues.ItemResource()
stats_endpoint = stats.Resource()
msg_collection = messages.CollectionResource()
msg_item = messages.ItemResource()
claim_collection = claims.CollectionResource()
claim_item = claims.ItemResource()
middleware = [
RequestIDComponent(),
CannedResponseComponent(body, headers),
]
api = falcon.App(middleware=middleware)
api.add_route('/v1/{tenant_id}/queues', queue_collection)
api.add_route('/v1/{tenant_id}/queues/{queue_name}', queue_item)
api.add_route('/v1/{tenant_id}/queues/{queue_name}/stats', stats_endpoint)
api.add_route('/v1/{tenant_id}/queues/{queue_name}/messages', msg_collection)
api.add_route('/v1/{tenant_id}/queues/{queue_name}/messages/{message_id}', msg_item)
api.add_route('/v1/{tenant_id}/queues/{queue_name}/claims', claim_collection)
api.add_route('/v1/{tenant_id}/queues/{queue_name}/claims/{claim_id}', claim_item)
return api
|
CannedResponseComponent
|
python
|
keras-team__keras
|
keras/src/metrics/regression_metrics.py
|
{
"start": 10365,
"end": 19756
}
|
class ____(reduction_metrics.Metric):
"""Computes R2 score.
Formula:
```python
sum_squares_residuals = sum((y_true - y_pred) ** 2)
sum_squares = sum((y_true - mean(y_true)) ** 2)
R2 = 1 - sum_squares_residuals / sum_squares
```
This is also called the
[coefficient of determination](
https://en.wikipedia.org/wiki/Coefficient_of_determination).
It indicates how close the fitted regression line
is to ground-truth data.
- The highest score possible is 1.0. It indicates that the predictors
perfectly accounts for variation in the target.
- A score of 0.0 indicates that the predictors do not
account for variation in the target.
- It can also be negative if the model is worse than random.
This metric can also compute the "Adjusted R2" score.
Args:
class_aggregation: Specifies how to aggregate scores corresponding to
different output classes (or target dimensions),
i.e. different dimensions on the last axis of the predictions.
Equivalent to `multioutput` argument in Scikit-Learn.
Should be one of
`None` (no aggregation), `"uniform_average"`,
`"variance_weighted_average"`.
num_regressors: Number of independent regressors used
("Adjusted R2" score). 0 is the standard R2 score.
Defaults to `0`.
name: Optional. string name of the metric instance.
dtype: Optional. data type of the metric result.
Example:
>>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
>>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
>>> metric = keras.metrics.R2Score()
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
>>> result
0.57142854
"""
def __init__(
self,
class_aggregation="uniform_average",
num_regressors=0,
name="r2_score",
dtype=None,
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
valid_class_aggregation_values = (
None,
"uniform_average",
"variance_weighted_average",
)
if class_aggregation not in valid_class_aggregation_values:
raise ValueError(
"Invalid value for argument `class_aggregation`. Expected "
f"one of {valid_class_aggregation_values}. "
f"Received: class_aggregation={class_aggregation}"
)
if num_regressors < 0:
raise ValueError(
"Invalid value for argument `num_regressors`. "
"Expected a value >= 0. "
f"Received: num_regressors={num_regressors}"
)
self.class_aggregation = class_aggregation
self.num_regressors = num_regressors
self.num_samples = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
name="num_samples",
)
self._built = False
def _build(self, y_true_shape, y_pred_shape):
if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
if y_pred_shape[-1] is None or y_true_shape[-1] is None:
raise ValueError(
"R2Score expects 2D inputs with shape "
"(batch_size, output_dim), with output_dim fully "
"defined (not None). Received input "
f"shapes: y_pred.shape={y_pred_shape} and "
f"y_true.shape={y_true_shape}."
)
num_classes = y_pred_shape[-1]
self.squared_sum = self.add_variable(
name="squared_sum",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.sum = self.add_variable(
name="sum",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.total_mse = self.add_variable(
name="residual",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self.count = self.add_variable(
name="count",
shape=[num_classes],
initializer=initializers.Zeros(),
)
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
Defaults to `1`.
Returns:
Update op.
"""
y_true = ops.convert_to_tensor(y_true, dtype=self._dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
if not self._built:
self._build(y_true.shape, y_pred.shape)
if sample_weight is None:
sample_weight = 1
sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)
if len(sample_weight.shape) == 1:
# Make sure there's a features dimension
sample_weight = ops.expand_dims(sample_weight, axis=1)
sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))
weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype)
self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0))
self.squared_sum.assign(
self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0)
)
self.total_mse.assign(
self.total_mse
+ ops.sum(
(y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype),
axis=0,
)
)
self.count.assign(self.count + ops.sum(sample_weight, axis=0))
self.num_samples.assign(self.num_samples + ops.size(y_true))
def result(self):
mean = self.sum / self.count
total = self.squared_sum - self.sum * mean
raw_scores = 1 - (self.total_mse / total)
raw_scores = ops.where(ops.isinf(raw_scores), 0.0, raw_scores)
if self.class_aggregation == "uniform_average":
r2_score = ops.mean(raw_scores)
elif self.class_aggregation == "variance_weighted_average":
weighted_sum = ops.sum(total * raw_scores)
sum_of_weights = ops.sum(total)
r2_score = weighted_sum / sum_of_weights
else:
r2_score = raw_scores
if self.num_regressors != 0:
if self.num_regressors > self.num_samples - 1:
warnings.warn(
"More independent predictors than datapoints "
"in adjusted R2 score. Falling back to standard R2 score.",
stacklevel=2,
)
elif self.num_regressors == self.num_samples - 1:
warnings.warn(
"Division by zero in Adjusted R2 score. "
"Falling back to standard R2 score.",
stacklevel=2,
)
else:
n = ops.convert_to_tensor(self.num_samples, dtype="float32")
p = ops.convert_to_tensor(self.num_regressors, dtype="float32")
num = ops.multiply(
ops.subtract(1.0, r2_score), ops.subtract(n, 1.0)
)
den = ops.subtract(ops.subtract(n, p), 1.0)
r2_score = ops.subtract(1.0, ops.divide(num, den))
return r2_score
def reset_state(self):
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
def get_config(self):
config = {
"name": self.name,
"dtype": self.dtype,
"class_aggregation": self.class_aggregation,
"num_regressors": self.num_regressors,
}
base_config = super().get_config()
return {**base_config, **config}
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Formula:
```python
loss = sum(l2_norm(y_true) * l2_norm(y_pred))
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Cosine similarity tensor.
Example:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)
[0., 0.99999994, -0.99999994]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_pred = normalize(y_pred, axis=axis)
y_true = normalize(y_true, axis=axis)
return ops.sum(y_true * y_pred, axis=axis)
|
R2Score
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_labels.py
|
{
"start": 1474,
"end": 16901
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "DefaultDialect"
__sparse_driver_backend__ = True
table1 = table(
"some_large_named_table",
column("this_is_the_primarykey_column"),
column("this_is_the_data_column"),
)
table2 = table(
"table_with_exactly_29_characs",
column("this_is_the_primarykey_column"),
column("this_is_the_data_column"),
)
def _length_fixture(self, length=IDENT_LENGTH, positional=False):
dialect = default.DefaultDialect()
dialect.max_identifier_length = (
dialect._user_defined_max_identifier_length
) = length
if positional:
dialect.paramstyle = "format"
dialect.positional = True
return dialect
def _engine_fixture(self, length=IDENT_LENGTH):
eng = engines.testing_engine()
eng.dialect.max_identifier_length = (
eng.dialect._user_defined_max_identifier_length
) = length
return eng
def test_label_length_raise_too_large(self):
max_ident_length = testing.db.dialect.max_identifier_length
eng = engines.testing_engine(
options={"label_length": max_ident_length + 10}
)
assert_raises_message(
exceptions.ArgumentError,
"Label length of %d is greater than this dialect's maximum "
"identifier length of %d"
% (max_ident_length + 10, max_ident_length),
eng.connect,
)
def test_label_length_custom_maxlen(self):
max_ident_length = testing.db.dialect.max_identifier_length
eng = engines.testing_engine(
options={
"label_length": max_ident_length + 10,
"max_identifier_length": max_ident_length + 20,
}
)
with eng.connect() as conn:
eq_(conn.dialect.max_identifier_length, max_ident_length + 20)
def test_label_length_custom_maxlen_dialect_only(self):
dialect = default.DefaultDialect(max_identifier_length=47)
eq_(dialect.max_identifier_length, 47)
def test_label_length_custom_maxlen_user_set_manually(self):
eng = engines.testing_engine()
eng.dialect.max_identifier_length = 47
# assume the dialect has no on-connect change
with mock.patch.object(
eng.dialect,
"_check_max_identifier_length",
side_effect=lambda conn: None,
):
with eng.connect():
pass
# it was maintained
eq_(eng.dialect.max_identifier_length, 47)
def test_label_length_too_large_custom_maxlen(self):
max_ident_length = testing.db.dialect.max_identifier_length
eng = engines.testing_engine(
options={
"label_length": max_ident_length - 10,
"max_identifier_length": max_ident_length - 20,
}
)
assert_raises_message(
exceptions.ArgumentError,
"Label length of %d is greater than this dialect's maximum "
"identifier length of %d"
% (max_ident_length - 10, max_ident_length - 20),
eng.connect,
)
def test_custom_max_identifier_length(self):
max_ident_length = testing.db.dialect.max_identifier_length
eng = engines.testing_engine(
options={"max_identifier_length": max_ident_length + 20}
)
with eng.connect() as conn:
eq_(conn.dialect.max_identifier_length, max_ident_length + 20)
def test_max_identifier_length_onconnect(self):
eng = engines.testing_engine()
def _check_max_identifer_length(conn):
return 47
with mock.patch.object(
eng.dialect,
"_check_max_identifier_length",
side_effect=_check_max_identifer_length,
) as mock_:
with eng.connect():
eq_(eng.dialect.max_identifier_length, 47)
eq_(mock_.mock_calls, [mock.call(mock.ANY)])
def test_max_identifier_length_onconnect_returns_none(self):
eng = engines.testing_engine()
max_ident_length = eng.dialect.max_identifier_length
def _check_max_identifer_length(conn):
return None
with mock.patch.object(
eng.dialect,
"_check_max_identifier_length",
side_effect=_check_max_identifer_length,
) as mock_:
with eng.connect():
eq_(eng.dialect.max_identifier_length, max_ident_length)
eq_(mock_.mock_calls, [mock.call(mock.ANY)])
def test_custom_max_identifier_length_onconnect(self):
eng = engines.testing_engine(options={"max_identifier_length": 49})
def _check_max_identifer_length(conn):
return 47
with mock.patch.object(
eng.dialect,
"_check_max_identifier_length",
side_effect=_check_max_identifer_length,
) as mock_:
with eng.connect():
eq_(eng.dialect.max_identifier_length, 49)
eq_(mock_.mock_calls, []) # was not called
def test_table_alias_1(self):
self.assert_compile(
self.table2.alias().select(),
"SELECT "
"table_with_exactly_29_c_1."
"this_is_the_primarykey_column, "
"table_with_exactly_29_c_1.this_is_the_data_column "
"FROM "
"table_with_exactly_29_characs "
"AS table_with_exactly_29_c_1",
dialect=self._length_fixture(),
)
def test_table_alias_2(self):
table1 = self.table1
table2 = self.table2
ta = table2.alias()
on = table1.c.this_is_the_data_column == ta.c.this_is_the_data_column
self.assert_compile(
select(table1, ta)
.select_from(table1.join(ta, on))
.where(ta.c.this_is_the_data_column == "data3")
.set_label_style(LABEL_STYLE_NONE),
"SELECT "
"some_large_named_table.this_is_the_primarykey_column, "
"some_large_named_table.this_is_the_data_column, "
"table_with_exactly_29_c_1.this_is_the_primarykey_column, "
"table_with_exactly_29_c_1.this_is_the_data_column "
"FROM "
"some_large_named_table "
"JOIN "
"table_with_exactly_29_characs "
"AS "
"table_with_exactly_29_c_1 "
"ON "
"some_large_named_table.this_is_the_data_column = "
"table_with_exactly_29_c_1.this_is_the_data_column "
"WHERE "
"table_with_exactly_29_c_1.this_is_the_data_column = "
":this_is_the_data_column_1",
dialect=self._length_fixture(),
)
def test_too_long_name_disallowed(self):
m = MetaData()
t = Table(
"this_name_is_too_long_for_what_were_doing_in_this_test",
m,
Column("foo", Integer),
)
eng = self._engine_fixture()
methods = (t.create, t.drop, m.create_all, m.drop_all)
for meth in methods:
assert_raises(exceptions.IdentifierError, meth, eng)
def _assert_labeled_table1_select(self, s):
table1 = self.table1
compiled = s.compile(dialect=self._length_fixture())
assert set(
compiled._create_result_map()["some_large_named_table__2"][1]
).issuperset(
[
"some_large_named_table_this_is_the_data_column",
"some_large_named_table__2",
table1.c.this_is_the_data_column,
]
)
assert set(
compiled._create_result_map()["some_large_named_table__1"][1]
).issuperset(
[
"some_large_named_table_this_is_the_primarykey_column",
"some_large_named_table__1",
table1.c.this_is_the_primarykey_column,
]
)
def test_result_map_use_labels(self):
table1 = self.table1
s = (
table1.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by(table1.c.this_is_the_primarykey_column)
)
self._assert_labeled_table1_select(s)
def test_result_map_limit(self):
table1 = self.table1
# some dialects such as oracle (and possibly ms-sql in a future
# version) generate a subquery for limits/offsets. ensure that the
# generated result map corresponds to the selected table, not the
# select query
s = (
table1.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by(table1.c.this_is_the_primarykey_column)
.limit(2)
)
self._assert_labeled_table1_select(s)
def test_result_map_subquery(self):
table1 = self.table1
s = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias("foo")
)
s2 = select(s)
compiled = s2.compile(dialect=self._length_fixture())
assert set(
compiled._create_result_map()["this_is_the_data_column"][1]
).issuperset(["this_is_the_data_column", s.c.this_is_the_data_column])
assert set(
compiled._create_result_map()["this_is_the_primarykey__1"][1]
).issuperset(
[
"this_is_the_primarykey_column",
"this_is_the_primarykey__1",
s.c.this_is_the_primarykey_column,
]
)
def test_result_map_anon_alias(self):
table1 = self.table1
dialect = self._length_fixture()
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias()
)
s = select(q).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
s,
"SELECT "
"anon_1.this_is_the_primarykey__2 AS anon_1_this_is_the_prim_1, "
"anon_1.this_is_the_data_column AS anon_1_this_is_the_data_3 "
"FROM ("
"SELECT "
"some_large_named_table."
"this_is_the_primarykey_column AS this_is_the_primarykey__2, "
"some_large_named_table."
"this_is_the_data_column AS this_is_the_data_column "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :this_is_the_primarykey__1"
") "
"AS anon_1",
dialect=dialect,
)
compiled = s.compile(dialect=dialect)
assert set(
compiled._create_result_map()["anon_1_this_is_the_data_3"][1]
).issuperset(
[
"anon_1_this_is_the_data_3",
q.corresponding_column(table1.c.this_is_the_data_column),
]
)
assert set(
compiled._create_result_map()["anon_1_this_is_the_prim_1"][1]
).issuperset(
[
"anon_1_this_is_the_prim_1",
q.corresponding_column(table1.c.this_is_the_primarykey_column),
]
)
def test_column_bind_labels_1(self):
table1 = self.table1
s = table1.select().where(table1.c.this_is_the_primarykey_column == 4)
self.assert_compile(
s,
"SELECT some_large_named_table.this_is_the_primarykey_column, "
"some_large_named_table.this_is_the_data_column "
"FROM some_large_named_table WHERE "
"some_large_named_table.this_is_the_primarykey_column = "
":this_is_the_primarykey__1",
checkparams={"this_is_the_primarykey__1": 4},
dialect=self._length_fixture(),
)
self.assert_compile(
s,
"SELECT some_large_named_table.this_is_the_primarykey_column, "
"some_large_named_table.this_is_the_data_column "
"FROM some_large_named_table WHERE "
"some_large_named_table.this_is_the_primarykey_column = "
"%s",
checkpositional=(4,),
checkparams={"this_is_the_primarykey__1": 4},
dialect=self._length_fixture(positional=True),
)
def test_column_bind_labels_2(self):
table1 = self.table1
s = table1.select().where(
or_(
table1.c.this_is_the_primarykey_column == 4,
table1.c.this_is_the_primarykey_column == 2,
)
)
self.assert_compile(
s,
"SELECT some_large_named_table.this_is_the_primarykey_column, "
"some_large_named_table.this_is_the_data_column "
"FROM some_large_named_table WHERE "
"some_large_named_table.this_is_the_primarykey_column = "
":this_is_the_primarykey__1 OR "
"some_large_named_table.this_is_the_primarykey_column = "
":this_is_the_primarykey__2",
checkparams={
"this_is_the_primarykey__1": 4,
"this_is_the_primarykey__2": 2,
},
dialect=self._length_fixture(),
)
self.assert_compile(
s,
"SELECT some_large_named_table.this_is_the_primarykey_column, "
"some_large_named_table.this_is_the_data_column "
"FROM some_large_named_table WHERE "
"some_large_named_table.this_is_the_primarykey_column = "
"%s OR "
"some_large_named_table.this_is_the_primarykey_column = "
"%s",
checkparams={
"this_is_the_primarykey__1": 4,
"this_is_the_primarykey__2": 2,
},
checkpositional=(4, 2),
dialect=self._length_fixture(positional=True),
)
def test_bind_param_non_truncated(self):
table1 = self.table1
stmt = table1.insert().values(
this_is_the_data_column=bindparam(
"this_is_the_long_bindparam_name"
)
)
compiled = stmt.compile(dialect=self._length_fixture(length=10))
eq_(
compiled.construct_params(
params={"this_is_the_long_bindparam_name": 5}
),
{"this_is_the_long_bindparam_name": 5},
)
def test_bind_param_truncated_named(self):
table1 = self.table1
bp = bindparam(_truncated_label("this_is_the_long_bindparam_name"))
stmt = table1.insert().values(this_is_the_data_column=bp)
compiled = stmt.compile(dialect=self._length_fixture(length=10))
eq_(
compiled.construct_params(
params={"this_is_the_long_bindparam_name": 5}
),
{"this_1": 5},
)
def test_bind_param_truncated_positional(self):
table1 = self.table1
bp = bindparam(_truncated_label("this_is_the_long_bindparam_name"))
stmt = table1.insert().values(this_is_the_data_column=bp)
compiled = stmt.compile(
dialect=self._length_fixture(length=10, positional=True)
)
eq_(
compiled.construct_params(
params={"this_is_the_long_bindparam_name": 5}
),
{"this_1": 5},
)
|
MaxIdentTest
|
python
|
sympy__sympy
|
sympy/functions/elementary/hyperbolic.py
|
{
"start": 36642,
"end": 42137
}
|
class ____(InverseHyperbolicFunction):
"""
``asinh(x)`` is the inverse hyperbolic sine of ``x``.
The inverse hyperbolic sine function.
Examples
========
>>> from sympy import asinh
>>> from sympy.abc import x
>>> asinh(x).diff(x)
1/sqrt(x**2 + 1)
>>> asinh(1)
log(1 + sqrt(2))
See Also
========
sympy.functions.elementary.hyperbolic.acosh
sympy.functions.elementary.hyperbolic.atanh
sympy.functions.elementary.hyperbolic.sinh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg.is_zero:
return S.Zero
elif arg is S.One:
return log(sqrt(2) + 1)
elif arg is S.NegativeOne:
return log(sqrt(2) - 1)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.ComplexInfinity
if arg.is_zero:
return S.Zero
i_coeff = _imaginary_unit_as_coefficient(arg)
if i_coeff is not None:
return I * asin(i_coeff)
else:
if arg.could_extract_minus_sign():
return -cls(-arg)
if isinstance(arg, sinh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
return z
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor((i + pi/2)/pi)
m = z - I*pi*f
even = f.is_even
if even is True:
return m
elif even is False:
return -m
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return -p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return S.NegativeOne**k * R / F * x**n / n
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
if x0.is_zero:
return arg.as_leading_term(x)
if x0 is S.NaN:
expr = self.func(arg.as_leading_term(x))
if expr.is_finite:
return expr
else:
return self
# Handling branch points
if x0 in (-I, I, S.ComplexInfinity):
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
# Handling points lying on branch cuts (-I*oo, -I) U (I, I*oo)
if (1 + x0**2).is_negative:
ndir = arg.dir(x, cdir if cdir else 1)
if re(ndir).is_positive:
if im(x0).is_negative:
return -self.func(x0) - I*pi
elif re(ndir).is_negative:
if im(x0).is_positive:
return -self.func(x0) + I*pi
else:
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir)
return self.func(x0)
def _eval_nseries(self, x, n, logx, cdir=0): # asinh
arg = self.args[0]
arg0 = arg.subs(x, 0)
# Handling branch points
if arg0 in (I, -I):
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
res = super()._eval_nseries(x, n=n, logx=logx)
if arg0 is S.ComplexInfinity:
return res
# Handling points lying on branch cuts (-I*oo, -I) U (I, I*oo)
if (1 + arg0**2).is_negative:
ndir = arg.dir(x, cdir if cdir else 1)
if re(ndir).is_positive:
if im(arg0).is_negative:
return -res - I*pi
elif re(ndir).is_negative:
if im(arg0).is_positive:
return -res + I*pi
else:
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
return res
def _eval_rewrite_as_log(self, x, **kwargs):
return log(x + sqrt(x**2 + 1))
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _eval_rewrite_as_atanh(self, x, **kwargs):
return atanh(x/sqrt(1 + x**2))
def _eval_rewrite_as_acosh(self, x, **kwargs):
ix = I*x
return I*(sqrt(1 - ix)/sqrt(ix - 1) * acosh(ix) - pi/2)
def _eval_rewrite_as_asin(self, x, **kwargs):
return -I * asin(I * x, evaluate=False)
def _eval_rewrite_as_acos(self, x, **kwargs):
return I * acos(I * x, evaluate=False) - I*pi/2
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sinh
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_is_extended_real(self):
return self.args[0].is_extended_real
def _eval_is_finite(self):
return self.args[0].is_finite
|
asinh
|
python
|
bokeh__bokeh
|
src/bokeh/models/tools.py
|
{
"start": 64744,
"end": 66078
}
|
class ____(GestureTool):
''' A base class for all interactive draw tool types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
default_overrides = Dict(String, AnyRef, default={}, help="""
Padding values overriding ``ColumnarDataSource.default_values``.
Defines values to insert into non-coordinate columns when a new glyph is
inserted into the ``ColumnDataSource`` columns, e.g. when a circle glyph
defines ``"x"``, ``"y"`` and ``"color"`` columns, adding a new point will
add the x and y-coordinates to ``"x"`` and ``"y"`` columns and the color
column will be filled with the defined default value.
""")
empty_value = Either(Bool, Int, Float, Date, Datetime, Color, String, default=0, help="""
The "last resort" padding value.
This is used the same as ``default_values``, when the tool was unable
to figure out a default value otherwise. The tool will try the following
alternatives in order:
1. ``EditTool.default_overrides``
2. ``ColumnarDataSource.default_values``
3. ``ColumnarDataSource``'s inferred default values
4. ``EditTool.empty_value``
""")
# TODO abstract renderers = List(Instance(GlyphRenderer & ...))
@abstract
|
EditTool
|
python
|
scikit-image__scikit-image
|
tests/skimage/io/test_collection.py
|
{
"start": 1773,
"end": 5357
}
|
class ____:
pics = [fetch('data/brick.png'), fetch('data/color.png'), fetch('data/moon.png')]
pattern = pics[:2]
pattern_same_shape = pics[::2]
def setup_method(self):
reset_plugins()
# Generic image collection with images of different shapes.
self.images = ImageCollection(self.pattern)
# Image collection with images having shapes that match.
self.images_matched = ImageCollection(self.pattern_same_shape)
# Same images as a collection of frames
self.frames_matched = MultiImage(self.pattern_same_shape)
def test_len(self):
assert len(self.images) == 2
def test_getitem(self):
num = len(self.images)
for i in range(-num, num):
assert isinstance(self.images[i], np.ndarray)
assert_allclose(self.images[0], self.images[-num])
def return_img(n):
return self.images[n]
with testing.raises(IndexError):
return_img(num)
with testing.raises(IndexError):
return_img(-num - 1)
def test_slicing(self):
assert type(self.images[:]) is ImageCollection
assert len(self.images[:]) == 2
assert len(self.images[:1]) == 1
assert len(self.images[1:]) == 1
assert_allclose(self.images[0], self.images[:1][0])
assert_allclose(self.images[1], self.images[1:][0])
assert_allclose(self.images[1], self.images[::-1][0])
assert_allclose(self.images[0], self.images[::-1][1])
def test_files_property(self):
assert isinstance(self.images.files, list)
def set_files(f):
self.images.files = f
with testing.raises(AttributeError):
set_files('newfiles')
@pytest.mark.skipif(not has_pooch, reason="needs pooch to download data")
def test_custom_load_func_sequence(self):
filename = fetch('data/no_time_for_that_tiny.gif')
def reader(index):
return iio3.imread(filename, index=index)
ic = ImageCollection(range(24), load_func=reader)
# the length of ic should be that of the given load_pattern sequence
assert len(ic) == 24
# GIF file has frames of size 25x14 with 4 channels (RGBA)
assert ic[0].shape == (25, 14, 3)
@pytest.mark.skipif(not has_pooch, reason="needs pooch to download data")
def test_custom_load_func_w_kwarg(self):
load_pattern = fetch('data/no_time_for_that_tiny.gif')
def load_fn(f, step):
vid = iio3.imiter(f)
return list(itertools.islice(vid, None, None, step))
ic = ImageCollection(load_pattern, load_func=load_fn, step=3)
# Each file should map to one image (array).
assert len(ic) == 1
# GIF file has 24 frames, so 24 / 3 equals 8.
assert len(ic[0]) == 8
def test_custom_load_func(self):
def load_fn(x):
return x
ic = ImageCollection(os.pathsep.join(self.pattern), load_func=load_fn)
assert_equal(ic[0], self.pattern[0])
def test_concatenate(self):
array = self.images_matched.concatenate()
expected_shape = (len(self.images_matched),) + self.images[0].shape
assert_equal(array.shape, expected_shape)
def test_concatenate_mismatched_image_shapes(self):
with testing.raises(ValueError):
self.images.concatenate()
def test_multiimage_imagecollection(self):
assert_equal(self.images_matched[0], self.frames_matched[0])
assert_equal(self.images_matched[1], self.frames_matched[1])
|
TestImageCollection
|
python
|
pyparsing__pyparsing
|
tests/test_unit.py
|
{
"start": 7129,
"end": 390869
}
|
class ____(ppt.TestParseResultsAsserts, TestCase):
suite_context = None
save_suite_context = None
def setUp(self):
self.suite_context.restore()
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
self.assertFalse(ParserElement._packratEnabled, "packrat enabled")
def testScanStringWithOverlap(self):
parser = pp.Word(pp.alphas, exact=3)
without_overlaps = sum(t for t, s, e in parser.scan_string("ABCDEFGHI")).as_list()
self.assertEqual(
["ABC", "DEF", "GHI"],
without_overlaps,
msg="scan_string without overlaps failed",
)
with_overlaps = sum(
t for t, s, e in parser.scan_string("ABCDEFGHI", overlap=True)
).as_list()
self.assertEqual(
["ABC", "BCD", "CDE", "DEF", "EFG", "FGH", "GHI"],
with_overlaps,
msg="scan_string with overlaps failed",
)
def testCombineWithResultsNames(self):
# test case reproducing Issue #350
from pyparsing import White, alphas, Word
parser = White(" \t").set_results_name("indent") + Word(
alphas
).set_results_name("word")
result = parser.parse_string(" test")
print(result.dump())
self.assertParseResultsEquals(
result, [" ", "test"], {"indent": " ", "word": "test"}
)
parser = White(" \t") + Word(alphas).set_results_name("word")
result = parser.parse_string(" test")
print(result.dump())
self.assertParseResultsEquals(result, [" ", "test"], {"word": "test"})
def testTransformString(self):
make_int_with_commas = ppc.integer().add_parse_action(lambda t: f"{t[0]:,}")
lower_case_words = pp.Word(pp.alphas.lower(), as_keyword=True) + pp.Optional(
pp.White()
)
nested_list = pp.nested_expr().add_parse_action(pp.ParseResults.as_list)
transformer = make_int_with_commas | nested_list | lower_case_words.suppress()
in_string = (
"I wish to buy 12345 shares of Acme Industries (as a gift to my (ex)wife)"
)
print(in_string)
out_string = transformer.transform_string(in_string)
print(out_string)
self.assertEqual(
"I 12,345 Acme Industries asagifttomyexwife",
out_string,
msg="failure in transform_string",
)
def testTransformStringWithLeadingWhitespace(self):
sample = "\n\ncheck"
sample = " check"
keywords = pp.one_of("aaa bbb", as_keyword=True)
ident = ~keywords + pp.Word(pp.alphas)
ident = pp.Combine(~keywords + pp.Word(pp.alphas))
# ident.add_parse_action(lambda t: t[0].upper())
ident.add_parse_action(ppc.upcase_tokens)
transformed = ident.transform_string(sample)
print(ppt.with_line_numbers(sample))
print(ppt.with_line_numbers(transformed))
self.assertEqual(sample.replace("check", "CHECK"), transformed)
def testTransformStringWithLeadingNotAny(self):
sample = "print a100"
keywords = set("print read".split())
ident = pp.Word(pp.alphas, pp.alphanums).add_condition(
lambda t: t[0] not in keywords
)
print(ident.search_string(sample))
def testTransformStringWithExpectedLeadingWhitespace(self):
sample1 = "\n\ncheck aaa"
sample2 = " check aaa"
keywords = pp.one_of("aaa bbb", as_keyword=True)
# This construct only works with parse_string, not with scan_string or its siblings
# ident = ~keywords + pp.Word(pp.alphas)
ident = pp.Word(pp.alphas)
ident.add_parse_action(ppc.upcase_tokens)
for sample in sample1, sample2:
transformed = (keywords | ident).transform_string(sample)
print(ppt.with_line_numbers(sample))
print(ppt.with_line_numbers(transformed))
self.assertEqual(sample.replace("check", "CHECK"), transformed)
print()
def testTransformStringWithLeadingWhitespaceFromTranslateProject(self):
from pyparsing import Keyword, Word, alphas, alphanums, Combine
block_start = (Keyword("{") | Keyword("BEGIN")).set_name("block_start")
block_end = (Keyword("}") | Keyword("END")).set_name("block_end")
reserved_words = block_start | block_end
# this is the first critical part of this test, an And with a leading NotAny
# This construct only works with parse_string, not with scan_string or its siblings
# name_id = ~reserved_words + Word(alphas, alphanums + "_").set_name("name_id")
name_id = Word(alphas, alphanums + "_").set_name("name_id")
dialog = name_id("block_id") + (Keyword("DIALOGEX") | Keyword("DIALOG"))(
"block_type"
)
string_table = Keyword("STRINGTABLE")("block_type")
test_string = (
"""\r\nSTRINGTABLE\r\nBEGIN\r\n// Comment\r\nIDS_1 "Copied"\r\nEND\r\n"""
)
print("Original:")
print(repr(test_string))
print("Should match:")
# this is the second critical part of this test, an Or or MatchFirst including dialog
for parser in (dialog ^ string_table, dialog | string_table):
result = (reserved_words | parser).transform_string(test_string)
print(repr(result))
self.assertEqual(
test_string,
result,
"Failed whitespace skipping with NotAny and MatchFirst/Or",
)
def testCuneiformTransformString(self):
class Cuneiform(pp.unicode_set):
"""Unicode set for Cuneiform Character Range"""
_ranges: list[tuple[int, ...]] = [
(0x10380, 0x103d5),
(0x12000, 0x123FF),
(0x12400, 0x1247F),
]
# define a MINIMAL Python parser
LPAR, RPAR, COLON, EQ = map(pp.Suppress, "():=")
def_ = pp.Keyword("𒁴𒈫", ident_chars=Cuneiform.identbodychars).set_name("def")
any_keyword = def_
ident = (~any_keyword) + pp.Word(
Cuneiform.identchars, Cuneiform.identbodychars, as_keyword=True
)
str_expr = pp.infix_notation(
pp.QuotedString('"') | pp.common.integer,
[
("*", 2, pp.OpAssoc.LEFT),
("+", 2, pp.OpAssoc.LEFT),
],
)
rvalue = pp.Forward()
fn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name("fn_call")
rvalue <<= fn_call | ident | str_expr | pp.common.number
assignment_stmt = ident + EQ + rvalue
stmt = pp.Group(fn_call | assignment_stmt).set_name("stmt")
fn_def = pp.Group(
def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR) + COLON
).set_name("fn_def")
fn_body = pp.IndentedBlock(stmt).set_name("fn_body")
fn_expr = pp.Group(fn_def + pp.Group(fn_body))
script = fn_expr[...] + stmt[...]
# parse some Python written in Cuneiform
cuneiform_hello_world = dedent(r"""
𒁴𒈫 𒀄𒂖𒆷𒁎():
𒀁 = "𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\n" * 3
𒄑𒉿𒅔𒋫(𒀁)
𒀄𒂖𒆷𒁎()
""")
# use transform_string to convert keywords and builtins to runnable Python
names_map = {
"𒄑𒉿𒅔𒋫": "print",
}
ident.add_parse_action(lambda t: names_map.get(t[0], t[0]))
def_.add_parse_action(lambda: "def")
print("\nconvert Cuneiform Python to executable Python")
transformed = (
# always put ident last
(def_ | ident)
.ignore(pp.quoted_string)
.transform_string(cuneiform_hello_world)
)
expected = dedent(r"""
def 𒀄𒂖𒆷𒁎():
𒀁 = "𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\n" * 3
print(𒀁)
𒀄𒂖𒆷𒁎()
""")
print(
"=================\n"
+ cuneiform_hello_world # .strip()
+ "\n=================\n"
+ transformed
+ "\n=================\n"
)
self.assertEqual(expected, transformed)
def testUpdateDefaultWhitespace(self):
prev_default_whitespace_chars = pp.ParserElement.DEFAULT_WHITE_CHARS
try:
pp.dbl_quoted_string.copyDefaultWhiteChars = False
pp.ParserElement.set_default_whitespace_chars(" \t")
self.assertEqual(
set(" \t"),
set(pp.sgl_quoted_string.whiteChars),
"set_default_whitespace_chars did not update sgl_quoted_string",
)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dbl_quoted_string.whiteChars),
"set_default_whitespace_chars updated dbl_quoted_string but should not",
)
finally:
pp.dbl_quoted_string.copyDefaultWhiteChars = True
pp.ParserElement.set_default_whitespace_chars(prev_default_whitespace_chars)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dbl_quoted_string.whiteChars),
"set_default_whitespace_chars updated dbl_quoted_string",
)
with ppt.reset_pyparsing_context():
pp.ParserElement.set_default_whitespace_chars(" \t")
self.assertNotEqual(
set(prev_default_whitespace_chars),
set(pp.dbl_quoted_string.whiteChars),
"set_default_whitespace_chars updated dbl_quoted_string but should not",
)
EOL = pp.LineEnd().suppress().set_name("EOL")
# Identifiers is a string + optional $
identifier = pp.Combine(pp.Word(pp.alphas) + pp.Optional("$"))
# Literals (number or double quoted string)
literal = ppc.number | pp.dbl_quoted_string
expression = literal | identifier
# expression.set_name("expression").set_debug()
# ppc.number.set_debug()
# ppc.integer.set_debug()
line_number = ppc.integer
# Keywords
PRINT = pp.CaselessKeyword("print")
print_stmt = PRINT - pp.ZeroOrMore(expression | ";")
statement = print_stmt
code_line = pp.Group(line_number + statement + EOL)
program = pp.ZeroOrMore(code_line)
test = """\
10 print 123;
20 print 234; 567;
30 print 890
"""
parsed_program = program.parse_string(test, parse_all=True)
print(parsed_program.dump())
self.assertEqual(
3,
len(parsed_program),
"failed to apply new whitespace chars to existing builtins",
)
def testUpdateDefaultWhitespace2(self):
with ppt.reset_pyparsing_context():
expr_tests = [
(pp.dbl_quoted_string, '"abc"'),
(pp.sgl_quoted_string, "'def'"),
(ppc.integer, "123"),
(ppc.number, "4.56"),
(ppc.identifier, "a_bc"),
]
NL = pp.LineEnd()
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parse_string(test_string, parse_all=True)
print(result.dump())
self.assertEqual(1, len(result), f"failed {test_string!r}")
pp.ParserElement.set_default_whitespace_chars(" \t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parse_string(test_string, parse_all=True)
print(result.dump())
self.assertEqual(3, len(result), f"failed {test_string!r}")
pp.ParserElement.set_default_whitespace_chars(" \n\t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parse_string(test_string, parse_all=True)
print(result.dump())
self.assertEqual(1, len(result), f"failed {test_string!r}")
def testParseFourFn(self):
import examples.fourFn as fourFn
import math
def test(s, ans):
fourFn.exprStack[:] = []
results = fourFn.BNF().parse_string(s, parse_all=True)
try:
resultValue = fourFn.evaluate_stack(fourFn.exprStack)
except Exception:
self.assertIsNone(ans, f"exception raised for expression {s!r}")
else:
self.assertEqual(
ans,
resultValue,
f"failed to evaluate {s}, got {resultValue:f}",
)
print(s, "->", resultValue)
test("9", 9)
test("-9", -9)
test("--9", 9)
test("-E", -math.e)
test("9 + 3 + 5", 9 + 3 + 5)
test("9 + 3 / 11", 9 + 3.0 / 11)
test("(9 + 3)", (9 + 3))
test("(9+3) / 11", (9 + 3.0) / 11)
test("9 - 12 - 6", 9 - 12 - 6)
test("9 - (12 - 6)", 9 - (12 - 6))
test("2*3.14159", 2 * 3.14159)
test("3.1415926535*3.1415926535 / 10", 3.1415926535 * 3.1415926535 / 10)
test("PI * PI / 10", math.pi * math.pi / 10)
test("PI*PI/10", math.pi * math.pi / 10)
test("PI^2", math.pi**2)
test("round(PI^2)", round(math.pi**2))
test("6.02E23 * 8.048", 6.02e23 * 8.048)
test("e / 3", math.e / 3)
test("sin(PI/2)", math.sin(math.pi / 2))
test("10+sin(PI/4)^2", 10 + math.sin(math.pi / 4) ** 2)
test("trunc(E)", int(math.e))
test("trunc(-E)", int(-math.e))
test("round(E)", round(math.e))
test("round(-E)", round(-math.e))
test("E^PI", math.e**math.pi)
test("exp(0)", 1)
test("exp(1)", math.e)
test("2^3^2", 2**3**2)
test("(2^3)^2", (2**3) ** 2)
test("2^3+2", 2**3 + 2)
test("2^3+5", 2**3 + 5)
test("2^9", 2**9)
test("sgn(-2)", -1)
test("sgn(0)", 0)
test("sgn(0.1)", 1)
test("foo(0.1)", None)
test("round(E, 3)", round(math.e, 3))
test("round(PI^2, 3)", round(math.pi**2, 3))
test("sgn(cos(PI/4))", 1)
test("sgn(cos(PI/2))", 0)
test("sgn(cos(PI*3/4))", -1)
test("+(sgn(cos(PI/4)))", 1)
test("-(sgn(cos(PI/4)))", -1)
def testParseSQL(self):
# SQL parser uses packrat parsing, not compatible with LR
if ParserElement._left_recursion_enabled:
return
import examples.simpleSQL as simpleSQL
def test(s, num_expected_toks, expected_errloc=-1):
try:
sqlToks = flatten(
simpleSQL.simpleSQL.parse_string(s, parse_all=True).as_list()
)
print(s, sqlToks, len(sqlToks))
self.assertEqual(
num_expected_toks,
len(sqlToks),
f"invalid parsed tokens, expected {num_expected_toks}, found {len(sqlToks)} ({sqlToks})",
)
except ParseException as e:
if expected_errloc >= 0:
self.assertEqual(
expected_errloc,
e.loc,
f"expected error at {expected_errloc}, found at {e.loc}",
)
test("SELECT * from XYZZY, ABC", 6)
test("select * from SYS.XYZZY", 5)
test("Select A from Sys.dual", 5)
test("Select A,B,C from Sys.dual", 7)
test("Select A, B, C from Sys.dual", 7)
test("Select A, B, C from Sys.dual, Table2 ", 8)
test("Xelect A, B, C from Sys.dual", 0, 0)
test("Select A, B, C frox Sys.dual", 0, 15)
test("Select", 0, 6)
test("Select &&& frox Sys.dual", 0, 7)
test("Select A from Sys.dual where a in ('RED','GREEN','BLUE')", 12)
test(
"Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)",
20,
)
test(
"Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators",
10,
)
def testParseConfigFile(self):
from examples import configParse
def test(fnam, num_expected_toks, resCheckList):
print("Parsing", fnam, "...", end=" ")
with open(fnam) as infile:
iniFileLines = "\n".join(infile.read().splitlines())
iniData = configParse.inifile_BNF().parse_string(iniFileLines, parse_all=True)
print(len(flatten(iniData.as_list())))
print(list(iniData.keys()))
self.assertEqual(
num_expected_toks,
len(flatten(iniData.as_list())),
f"file {fnam} not parsed correctly",
)
for chkkey, chkexpect in resCheckList:
var = iniData
for attr in chkkey.split("."):
var = getattr(var, attr)
print(chkkey, var, chkexpect)
self.assertEqual(
chkexpect,
var,
f"ParseConfigFileTest: failed to parse ini {chkkey!r} as expected {chkexpect!r}, found {var}",
)
print("OK")
test(
"tests/karthik.ini",
23,
[("users.K", "8"), ("users.mod_scheme", "'QPSK'"), ("users.Na", "K+2")],
)
test(
"examples/Setup.ini",
125,
[
("Startup.audioinf", "M3i"),
("Languages.key1", "0x0003"),
("test.foo", "bar"),
],
)
def testParseJSONData(self):
expected = [
{
"glossary": {
"GlossDiv": {
"GlossList": [
{
"Abbrev": "ISO 8879:1986",
"Acronym": "SGML",
"AvogadroNumber": 6.02e23,
"EmptyDict": {},
"EmptyList": [],
"EvenPrimesGreaterThan2": [],
"FermatTheoremInMargin": False,
"GlossDef": "A meta-markup language, "
"used to create markup "
"languages such as "
"DocBook.",
"GlossSeeAlso": ["GML", "XML", "markup"],
"GlossTerm": "Standard Generalized " "Markup Language",
"ID": "SGML",
"LargestPrimeLessThan100": 97,
"MapRequiringFiveColors": None,
"PrimesLessThan10": [2, 3, 5, 7],
"SortAs": "SGML",
}
],
"title": "S",
},
"title": "example glossary",
}
},
{
"menu": {
"id": "file",
"popup": {
"menuitem": [
{"onclick": "CreateNewDoc()", "value": "New"},
{"onclick": "OpenDoc()", "value": "Open"},
{"onclick": "CloseDoc()", "value": "Close"},
]
},
"value": "File:",
}
},
{
"widget": {
"debug": "on",
"image": {
"alignment": "center",
"hOffset": 250,
"name": "sun1",
"src": "Images/Sun.png",
"vOffset": 250,
},
"text": {
"alignment": "center",
"data": "Click Here",
"hOffset": 250,
"name": "text1",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;",
"size": 36,
"style": "bold",
"vOffset": 100,
},
"window": {
"height": 500,
"name": "main_window",
"title": "Sample Konfabulator Widget",
"width": 500,
},
}
},
{
"web-app": {
"servlet": [
{
"init-param": {
"cachePackageTagsRefresh": 60,
"cachePackageTagsStore": 200,
"cachePackageTagsTrack": 200,
"cachePagesDirtyRead": 10,
"cachePagesRefresh": 10,
"cachePagesStore": 100,
"cachePagesTrack": 200,
"cacheTemplatesRefresh": 15,
"cacheTemplatesStore": 50,
"cacheTemplatesTrack": 100,
"configGlossary:adminEmail": "ksm@pobox.com",
"configGlossary:installationAt": "Philadelphia, " "PA",
"configGlossary:poweredBy": "Cofax",
"configGlossary:poweredByIcon": "/images/cofax.gif",
"configGlossary:staticPath": "/content/static",
"dataStoreClass": "org.cofax.SqlDataStore",
"dataStoreConnUsageLimit": 100,
"dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver",
"dataStoreInitConns": 10,
"dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log",
"dataStoreLogLevel": "debug",
"dataStoreMaxConns": 100,
"dataStoreName": "cofax",
"dataStorePassword": "dataStoreTestQuery",
"dataStoreTestQuery": "SET NOCOUNT "
"ON;select "
"test='test';",
"dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
"dataStoreUser": "sa",
"defaultFileTemplate": "articleTemplate.htm",
"defaultListTemplate": "listTemplate.htm",
"jspFileTemplate": "articleTemplate.jsp",
"jspListTemplate": "listTemplate.jsp",
"maxUrlLength": 500,
"redirectionClass": "org.cofax.SqlRedirection",
"searchEngineFileTemplate": "forSearchEngines.htm",
"searchEngineListTemplate": "forSearchEnginesList.htm",
"searchEngineRobotsDb": "WEB-INF/robots.db",
"templateLoaderClass": "org.cofax.FilesTemplateLoader",
"templateOverridePath": "",
"templatePath": "templates",
"templateProcessorClass": "org.cofax.WysiwygTemplate",
"useDataStore": True,
"useJSP": False,
},
"servlet-class": "org.cofax.cds.CDSServlet",
"servlet-name": "cofaxCDS",
},
{
"init-param": {
"mailHost": "mail1",
"mailHostOverride": "mail2",
},
"servlet-class": "org.cofax.cds.EmailServlet",
"servlet-name": "cofaxEmail",
},
{
"servlet-class": "org.cofax.cds.AdminServlet",
"servlet-name": "cofaxAdmin",
},
{
"servlet-class": "org.cofax.cds.FileServlet",
"servlet-name": "fileServlet",
},
{
"init-param": {
"adminGroupID": 4,
"betaServer": True,
"dataLog": 1,
"dataLogLocation": "/usr/local/tomcat/logs/dataLog.log",
"dataLogMaxSize": "",
"fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder",
"log": 1,
"logLocation": "/usr/local/tomcat/logs/CofaxTools.log",
"logMaxSize": "",
"lookInContext": 1,
"removePageCache": "/content/admin/remove?cache=pages&id=",
"removeTemplateCache": "/content/admin/remove?cache=templates&id=",
"templatePath": "toolstemplates/",
},
"servlet-class": "org.cofax.cms.CofaxToolsServlet",
"servlet-name": "cofaxTools",
},
],
"servlet-mapping": {
"cofaxAdmin": "/admin/*",
"cofaxCDS": "/",
"cofaxEmail": "/cofaxutil/aemail/*",
"cofaxTools": "/tools/*",
"fileServlet": "/static/*",
},
"taglib": {
"taglib-location": "/WEB-INF/tlds/cofax.tld",
"taglib-uri": "cofax.tld",
},
}
},
{
"menu": {
"header": "SVG Viewer",
"items": [
{"id": "Open"},
{"id": "OpenNew", "label": "Open New"},
None,
{"id": "ZoomIn", "label": "Zoom In"},
{"id": "ZoomOut", "label": "Zoom Out"},
{"id": "OriginalView", "label": "Original View"},
None,
{"id": "Quality"},
{"id": "Pause"},
{"id": "Mute"},
None,
{"id": "Find", "label": "Find..."},
{"id": "FindAgain", "label": "Find Again"},
{"id": "Copy"},
{"id": "CopyAgain", "label": "Copy Again"},
{"id": "CopySVG", "label": "Copy SVG"},
{"id": "ViewSVG", "label": "View SVG"},
{"id": "ViewSource", "label": "View Source"},
{"id": "SaveAs", "label": "Save As"},
None,
{"id": "Help"},
{"id": "About", "label": "About Adobe CVG Viewer..."},
],
}
},
]
for t, exp_result in zip((test1, test2, test3, test4, test5), expected):
result = jsonObject.parse_string(t, parse_all=True)
self.assertEqual(exp_result, result[0])
def testParseCommaSeparatedValues(self):
testData = [
"a,b,c,100.2,,3",
"d, e, j k , m ",
"'Hello, World', f, g , , 5.1,x",
"John Doe, 123 Main St., Cleveland, Ohio",
"Jane Doe, 456 St. James St., Los Angeles , California ",
"",
]
testVals = [
[(3, "100.2"), (4, ""), (5, "3")],
[(2, "j k"), (3, "m")],
[(0, "'Hello, World'"), (2, "g"), (3, "")],
[(0, "John Doe"), (1, "123 Main St."), (2, "Cleveland"), (3, "Ohio")],
[
(0, "Jane Doe"),
(1, "456 St. James St."),
(2, "Los Angeles"),
(3, "California"),
],
]
for line, tests in zip(testData, testVals):
print(f"Parsing: {line!r} ->", end=" ")
results = ppc.comma_separated_list.parse_string(line, parse_all=True)
print(results)
for t in tests:
if not (len(results) > t[0] and results[t[0]] == t[1]):
print("$$$", results.dump())
print("$$$", results[0])
self.assertTrue(
len(results) > t[0] and results[t[0]] == t[1],
f"failed on {line}, item {t[0]:d} s/b '{t[1]}', got '{results.as_list()}'",
)
def testParseEBNF(self):
from examples import ebnf
print("Constructing EBNF parser with pyparsing...")
grammar = """
syntax = (syntax_rule), {(syntax_rule)};
syntax_rule = meta_identifier, '=', definitions_list, ';';
definitions_list = single_definition, {'|', single_definition};
single_definition = syntactic_term, {',', syntactic_term};
syntactic_term = syntactic_factor,['-', syntactic_factor];
syntactic_factor = [integer, '*'], syntactic_primary;
syntactic_primary = optional_sequence | repeated_sequence |
grouped_sequence | meta_identifier | terminal_string;
optional_sequence = '[', definitions_list, ']';
repeated_sequence = '{', definitions_list, '}';
grouped_sequence = '(', definitions_list, ')';
(*
terminal_string = "'", character - "'", {character - "'"}, "'" |
'"', character - '"', {character - '"'}, '"';
meta_identifier = letter, {letter | digit};
integer = digit, {digit};
*)
"""
table = {}
table["terminal_string"] = pp.quoted_string
table["meta_identifier"] = pp.Word(pp.alphas + "_", pp.alphas + "_" + pp.nums)
table["integer"] = pp.Word(pp.nums)
print("Parsing EBNF grammar with EBNF parser...")
parsers = ebnf.parse(grammar, table)
ebnf_parser = parsers["syntax"]
ebnf_comment = pp.Literal("(*") + ... + "*)"
ebnf_parser.ignore(ebnf_comment)
print("-", "\n- ".join(parsers.keys()))
self.assertEqual(
13, len(list(parsers.keys())), "failed to construct syntax grammar"
)
print("Parsing EBNF grammar with generated EBNF parser...")
parsed_chars = ebnf_parser.parse_string(grammar, parse_all=True)
parsed_char_len = len(parsed_chars)
print("],\n".join(str(parsed_chars.as_list()).split("],")))
self.assertEqual(
98,
len(flatten(parsed_chars.as_list())),
"failed to tokenize grammar correctly",
)
def testParseEBNFmissingDefinitions(self):
"""
Test detection of missing definitions in EBNF
"""
from examples import ebnf
grammar = """
(*
EBNF for number_words.py
*)
number = [thousands, [and]], [hundreds, [and]], [one_to_99];
"""
with self.assertRaisesRegex(
AssertionError,
r"Missing definitions for \['thousands', 'and', 'hundreds', 'one_to_99']"
):
ebnf.parse(grammar)
def testParseIDL(self):
from examples import idlParse
def test(strng, numToks, expectedErrloc=0):
print(strng)
try:
bnf = idlParse.CORBA_IDL_BNF()
tokens = bnf.parse_string(strng, parse_all=True)
print("tokens = ")
tokens.pprint()
tokens = flatten(tokens.as_list())
print(len(tokens))
self.assertEqual(
numToks,
len(tokens),
f"error matching IDL string, {strng} -> {tokens}",
)
except ParseException as err:
print(err.line)
print(f"{' ' * (err.column - 1)}^")
print(err)
self.assertEqual(
0,
numToks,
f"unexpected ParseException while parsing {strng}, {err}",
)
self.assertEqual(
expectedErrloc,
err.loc,
f"expected ParseException at {expectedErrloc}, found exception at {err.loc}",
)
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef sequence<string> stringSeq;
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1(in string arg1, inout long arg2);
stringSeqSeq method2(in string arg1, inout long arg2, inout long arg3);
string method3();
};
""",
59,
)
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef
/** ** *** **** *
* a block comment *
*/
sequence<string> /*comment inside an And */ stringSeq;
/* */ /**/ /***/ /****/
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1(in string arg1, inout long arg2);
stringSeqSeq method2(in string arg1, inout long arg2, inout long arg3);
string method3();
};
""",
59,
)
test(
r"""
const string test="Test String\n";
const long a = 0;
const long b = -100;
const float c = 3.14159;
const long d = 0x007f7f7f;
exception TestException
{
string msg;
sequence<string> dataStrings;
};
interface TestInterface
{
void method1(in string arg1, inout long arg2);
};
""",
60,
)
test(
"""
module Test1
{
exception TestException
{
string msg;
];
interface TestInterface
{
void method1(in string arg1, inout long arg2)
raises (TestException);
};
};
""",
0,
56,
)
test(
"""
module Test1
{
exception TestException
{
string msg;
};
};
""",
13,
)
def testParseVerilog(self):
pass
def testScanString(self):
testdata = """
<table border="0" cellpadding="3" cellspacing="3" frame="" width="90%">
<tr align="left" valign="top">
<td><b>Name</b></td>
<td><b>IP Address</b></td>
<td><b>Location</b></td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-a.nist.gov</td>
<td>129.6.15.28</td>
<td>NIST, Gaithersburg, Maryland</td>
</tr>
<tr align="left" valign="top">
<td>time-b.nist.gov</td>
<td>129.6.15.29</td>
<td>NIST, Gaithersburg, Maryland</td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-a.timefreq.bldrdoc.gov</td>
<td>132.163.4.101</td>
<td>NIST, Boulder, Colorado</td>
</tr>
<tr align="left" valign="top">
<td>time-b.timefreq.bldrdoc.gov</td>
<td>132.163.4.102</td>
<td>NIST, Boulder, Colorado</td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-c.timefreq.bldrdoc.gov</td>
<td>132.163.4.103</td>
<td>NIST, Boulder, Colorado</td>
</tr>
</table>
"""
integer = pp.Word(pp.nums)
ipAddress = pp.Combine(integer + "." + integer + "." + integer + "." + integer)
tdStart = pp.Suppress("<td>")
tdEnd = pp.Suppress("</td>")
timeServerPattern = (
tdStart
+ ipAddress("ipAddr")
+ tdEnd
+ tdStart
+ pp.CharsNotIn("<")("loc")
+ tdEnd
)
servers = [
srvr.ipAddr
for srvr, startloc, endloc in timeServerPattern.scan_string(testdata)
]
print(servers)
self.assertEqual(
[
"129.6.15.28",
"129.6.15.29",
"132.163.4.101",
"132.163.4.102",
"132.163.4.103",
],
servers,
"failed scan_string()",
)
servers = [
srvr.ipAddr
for srvr, startloc, endloc in timeServerPattern.scan_string(testdata, max_matches=3)
]
self.assertEqual(
[
"129.6.15.28",
"129.6.15.29",
"132.163.4.101",
],
servers,
"failed scan_string() with max_matches=3",
)
# test for string_end detection in scan_string
foundStringEnds = [r for r in pp.StringEnd().scan_string("xyzzy")]
print(foundStringEnds)
self.assertTrue(foundStringEnds, "Failed to find StringEnd in scan_string")
def testQuotedStrings(self):
testData = """
'a valid single quoted string'
'an invalid single quoted string
because it spans lines'
"a valid double quoted string"
"an invalid double quoted string
because it spans lines"
"""
print(testData)
with self.subTest():
sglStrings = [
(t[0], b, e) for (t, b, e) in pp.sgl_quoted_string.scan_string(testData)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 47),
"single quoted string failure",
)
with self.subTest():
dblStrings = [
(t[0], b, e) for (t, b, e) in pp.dbl_quoted_string.scan_string(testData)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 154 and dblStrings[0][2] == 184),
"double quoted string failure",
)
with self.subTest():
allStrings = [
(t[0], b, e) for (t, b, e) in pp.quoted_string.scan_string(testData)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (allStrings[0][1] == 17 and allStrings[0][2] == 47)
and (allStrings[1][1] == 154 and allStrings[1][2] == 184),
"quoted string failure",
)
escapedQuoteTest = r"""
'This string has an escaped (\') quote character'
"This string has an escaped (\") quote character"
"""
with self.subTest():
sglStrings = [
(t[0], b, e)
for (t, b, e) in pp.sgl_quoted_string.scan_string(escapedQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
f"single quoted string escaped quote failure ({sglStrings[0]})",
)
with self.subTest():
dblStrings = [
(t[0], b, e)
for (t, b, e) in pp.dbl_quoted_string.scan_string(escapedQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
f"double quoted string escaped quote failure ({dblStrings[0]})",
)
with self.subTest():
allStrings = [
(t[0], b, e)
for (t, b, e) in pp.quoted_string.scan_string(escapedQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
f"quoted string escaped quote failure ({[str(s[0]) for s in allStrings]})",
)
dblQuoteTest = r"""
'This string has an doubled ('') quote character'
"This string has an doubled ("") quote character"
"""
with self.subTest():
sglStrings = [
(t[0], b, e)
for (t, b, e) in pp.sgl_quoted_string.scan_string(dblQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
f"single quoted string escaped quote failure ({sglStrings[0]})",
)
with self.subTest():
dblStrings = [
(t[0], b, e)
for (t, b, e) in pp.dbl_quoted_string.scan_string(dblQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
f"double quoted string escaped quote failure ({dblStrings[0]})",
)
with self.subTest():
allStrings = [
(t[0], b, e) for (t, b, e) in pp.quoted_string.scan_string(dblQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
f"quoted string escaped quote failure ({[str(s[0]) for s in allStrings]})",
)
# test invalid end_quote_char
with self.subTest():
with self.assertRaises(
ValueError, msg="issue raising error for invalid end_quote_char"
):
expr = pp.QuotedString('"', end_quote_char=" ")
with self.subTest():
source = """
'''
multiline quote with comment # this is a comment
'''
\"\"\"
multiline quote with comment # this is a comment
\"\"\"
"single line quote with comment # this is a comment"
'single line quote with comment # this is a comment'
"""
stripped = (
pp.python_style_comment.ignore(pp.python_quoted_string)
.suppress()
.transform_string(source)
)
self.assertEqual(source, stripped)
def testQuotedStringUnquotesAndConvertWhitespaceEscapes(self):
# test for Issue #474
# fmt: off
backslash = chr(92) # a single backslash
tab = "\t"
newline = "\n"
test_string_0 = f'"{backslash}{backslash}n"' # r"\\n"
test_string_1 = f'"{backslash}t{backslash}{backslash}n"' # r"\t\\n"
test_string_2 = f'"a{backslash}tb"' # r"a\tb"
test_string_3 = f'"{backslash}{backslash}{backslash}n"' # r"\\\n"
T, F = True, False # these make the test cases format nicely
for test_parameters in (
# Parameters are the arguments to creating a QuotedString
# and the expected parsed list of characters):
# - unquote_results
# - convert_whitespace_escapes
# - test string
# - expected parsed characters (broken out as separate
# list items (all those doubled backslashes make it
# difficult to interpret the output)
(T, T, test_string_0, [backslash, "n"]),
(T, F, test_string_0, [backslash, "n"]),
(F, F, test_string_0, ['"', backslash, backslash, "n", '"']),
(T, T, test_string_1, [tab, backslash, "n"]),
(T, F, test_string_1, ["t", backslash, "n"]),
(F, F, test_string_1, ['"', backslash, "t", backslash, backslash, "n", '"']),
(T, T, test_string_2, ["a", tab, "b"]),
(T, F, test_string_2, ["a", "t", "b"]),
(F, F, test_string_2, ['"', "a", backslash, "t", "b", '"']),
(T, T, test_string_3, [backslash, newline]),
(T, F, test_string_3, [backslash, "n"]),
(F, F, test_string_3, ['"', backslash, backslash, backslash, "n", '"']),
):
unquote_results, convert_ws_escapes, test_string, expected_list = test_parameters
test_description = f"Testing with parameters {test_parameters}"
with self.subTest(msg=test_description):
print(test_description)
print(f"unquote_results: {unquote_results}"
f"\nconvert_whitespace_escapes: {convert_ws_escapes}")
qs_expr = pp.QuotedString(
quote_char='"',
esc_char='\\',
unquote_results=unquote_results,
convert_whitespace_escapes=convert_ws_escapes
)
result = qs_expr.parse_string(test_string)
# do this instead of assertParserAndCheckList to explicitly
# check and display the separate items in the list
print("Results:")
control_chars = {newline: "<NEWLINE>", backslash: "<BACKSLASH>", tab: "<TAB>"}
print(f"[{', '.join(control_chars.get(c, repr(c)) for c in result[0])}]")
self.assertEqual(expected_list, list(result[0]))
print()
# fmt: on
def testPythonQuotedStrings(self):
# fmt: off
success1, _ = pp.python_quoted_string.run_tests([
'"""xyz"""',
'''"""xyz
"""''',
'"""xyz "" """',
'''"""xyz ""
"""''',
'"""xyz " """',
'''"""xyz "
"""''',
r'''"""xyz \"""
"""''',
"'''xyz'''",
"""'''xyz
'''""",
"'''xyz '' '''",
"""'''xyz ''
'''""",
"'''xyz ' '''",
"""'''xyz '
'''""",
r"""'''xyz \'''
'''""",
])
print("\n\nFailure tests")
success2, _ = pp.python_quoted_string.run_tests([
'"xyz"""',
], failure_tests=True)
self.assertTrue(success1 and success2, "Python quoted string matching failure")
# fmt: on
def testCaselessOneOf(self):
caseless1 = pp.one_of("d a b c aA B A C", caseless=True)
caseless1str = str(caseless1)
print(caseless1str)
caseless2 = pp.one_of("d a b c Aa B A C", caseless=True)
caseless2str = str(caseless2)
print(caseless2str)
self.assertEqual(
caseless1str.upper(),
caseless2str.upper(),
"one_of not handling caseless option properly",
)
self.assertNotEqual(
caseless1str, caseless2str, "Caseless option properly sorted"
)
res = caseless1[...].parse_string("AAaaAaaA", parse_all=True)
print(res)
self.assertEqual(4, len(res), "caseless1 one_of failed")
self.assertEqual(
"aA" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
res = caseless2[...].parse_string("AAaaAaaA", parse_all=True)
print(res)
self.assertEqual(4, len(res), "caseless2 one_of failed")
self.assertEqual(
"Aa" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
def testCStyleCommentParser(self):
print("verify processing of C-style /* */ comments")
testdata = f"""
/* */
/** **/
/**/
/*{'*' * 1_000_000}*/
/****/
/* /*/
/** /*/
/*** /*/
/*
ablsjdflj
*/
"""
for test_expr in (pp.c_style_comment, pp.cpp_style_comment, pp.java_style_comment):
with self.subTest("parse test - /* */ comments", test_expr=test_expr):
found_matches = [
len(t[0]) for t, s, e in test_expr.scan_string(testdata)
]
self.assertEqual(
[5, 7, 4, 1000004, 6, 6, 7, 8, 33],
found_matches,
f"only found {test_expr} lengths {found_matches}",
)
found_lines = [
pp.lineno(s, testdata) for t, s, e in test_expr.scan_string(testdata)
]
self.assertEqual(
[2, 3, 4, 5, 6, 7, 8, 9, 10],
found_lines,
f"only found {test_expr} on lines {found_lines}",
)
def testHtmlCommentParser(self):
print("verify processing of HTML comments")
test_expr = pp.html_comment
testdata = """
<!-- -->
<!--- --->
<!---->
<!----->
<!------>
<!-- /-->
<!--- /-->
<!---- /-->
<!---- /- ->
<!---- / -- >
<!--
ablsjdflj
-->
"""
found_matches = [
len(t[0]) for t, s, e in test_expr.scan_string(testdata)
]
self.assertEqual(
[8, 10, 7, 8, 9, 9, 10, 11, 79],
found_matches,
f"only found {test_expr} lengths {found_matches}",
)
found_lines = [
pp.lineno(s, testdata) for t, s, e in pp.html_comment.scan_string(testdata)
]
self.assertEqual(
[2, 3, 4, 5, 6, 7, 8, 9, 10],
found_lines,
f"only found HTML comments on lines {found_lines}",
)
def testDoubleSlashCommentParser(self):
print("verify processing of C++ and Java comments - // comments")
# test C++ single line comments that have line terminated with '\' (should continue comment to following line)
testdata = r"""
// comment1
// comment2 \
still comment 2
// comment 3
"""
for test_expr in (pp.dbl_slash_comment, pp.cpp_style_comment, pp.java_style_comment):
with self.subTest("parse test - // comments", test_expr=test_expr):
found_matches = [
len(t[0]) for t, s, e in test_expr.scan_string(testdata)
]
self.assertEqual(
[11, 41, 12],
found_matches,
f"only found {test_expr} lengths {found_matches}",
)
found_lines = [
pp.lineno(s, testdata) for t, s, e in test_expr.scan_string(testdata)
]
self.assertEqual(
[2, 3, 5],
found_lines,
f"only found {test_expr} on lines {found_lines}",
)
def testReCatastrophicBacktrackingInQuotedStringParsers(self):
# reported by webpentest - 2016-04-28
print(
"testing catastrophic RE backtracking in implementation of quoted string parsers"
)
repeat = 5000
for expr, test_string in [
(pp.dblQuotedString, '"' + "\xff" * repeat),
(pp.sglQuotedString, "'" + "\xff" * repeat),
(pp.quotedString, '"' + "\xff" * repeat),
(pp.quotedString, "'" + "\xff" * repeat),
(pp.QuotedString('"'), '"' + "\xff" * repeat),
(pp.QuotedString("'"), "'" + "\xff" * repeat),
]:
test_string_label = f"{test_string[:2]}..."
with self.subTest(expr=expr, test_string=repr(test_string_label)):
# parse a valid quoted string
expr.parse_string(test_string + test_string[0], parse_all=True)
# try to parse a quoted string with no trailing quote
with self.assertRaisesParseException():
expr.parse_string(test_string, parse_all=True)
def testReCatastrophicBacktrackingInCommentParsers(self):
print(
"testing catastrophic RE backtracking in implementation of comment parsers"
)
for expr, test_string in [
(pp.c_style_comment, f"/*{'*' * 500}"),
(pp.cpp_style_comment, f"/*{'*' * 500}"),
(pp.java_style_comment, f"/*{'*' * 500}"),
(pp.html_comment, f"<-- {'-' * 500}")
]:
with self.subTest("Test catastrophic RE backtracking", expr=expr):
try:
expr.parse_string(test_string)
except pp.ParseException:
continue
def testParseExpressionResults(self):
a = pp.Word("a", pp.alphas).set_name("A")
b = pp.Word("b", pp.alphas).set_name("B")
c = pp.Word("c", pp.alphas).set_name("C")
ab = (a + b).set_name("AB")
abc = (ab + c).set_name("ABC")
word = pp.Word(pp.alphas).set_name("word")
words = pp.Group(pp.OneOrMore(~a + word)).set_name("words")
phrase = (
words("Head")
+ pp.Group(a + pp.Optional(b + pp.Optional(c)))("ABC")
+ words("Tail")
)
results = phrase.parse_string(
"xavier yeti alpha beta charlie will beaver", parse_all=True
)
print(results, results.Head, results.ABC, results.Tail)
for key, ln in [("Head", 2), ("ABC", 3), ("Tail", 2)]:
self.assertEqual(
ln,
len(results[key]),
f"expected {ln:d} elements in {key}, found {results[key]}",
)
def testParseKeyword(self):
kw = pp.Keyword("if")
lit = pp.Literal("if")
def test(s, litShouldPass, kwShouldPass):
print("Test", s)
print("Match Literal", end=" ")
try:
print(lit.parse_string(s, parse_all=False))
except Exception:
print("failed")
if litShouldPass:
self.fail(f"Literal failed to match {s}, should have")
else:
if not litShouldPass:
self.fail(f"Literal matched {s}, should not have")
print("Match Keyword", end=" ")
try:
print(kw.parse_string(s, parse_all=False))
except Exception:
print("failed")
if kwShouldPass:
self.fail(f"Keyword failed to match {s}, should have")
else:
if not kwShouldPass:
self.fail(f"Keyword matched {s}, should not have")
test("ifOnlyIfOnly", True, False)
test("if(OnlyIfOnly)", True, True)
test("if (OnlyIf Only)", True, True)
kw = pp.Keyword("if", caseless=True)
test("IFOnlyIfOnly", False, False)
test("If(OnlyIfOnly)", False, True)
test("iF (OnlyIf Only)", False, True)
with self.assertRaises(
ValueError, msg="failed to warn empty string passed to Keyword"
):
kw = pp.Keyword("")
def testParseExpressionResultsAccumulate(self):
num = pp.Word(pp.nums).set_name("num")("base10*")
hexnum = pp.Combine("0x" + pp.Word(pp.nums)).set_name("hexnum")("hex*")
name = pp.Word(pp.alphas).set_name("word")("word*")
list_of_num = pp.DelimitedList(hexnum | num | name, ",")
tokens = list_of_num.parse_string("1, 0x2, 3, 0x4, aaa", parse_all=True)
print(tokens.dump())
self.assertParseResultsEquals(
tokens,
expected_list=["1", "0x2", "3", "0x4", "aaa"],
expected_dict={
"base10": ["1", "3"],
"hex": ["0x2", "0x4"],
"word": ["aaa"],
},
)
lbrack = pp.Literal("(").suppress()
rbrack = pp.Literal(")").suppress()
integer = pp.Word(pp.nums).set_name("int")
variable = pp.Word(pp.alphas, max=1).set_name("variable")
relation_body_item = (
variable | integer | pp.quoted_string().set_parse_action(pp.remove_quotes)
)
relation_name = pp.Word(pp.alphas + "_", pp.alphanums + "_")
relation_body = lbrack + pp.Group(pp.DelimitedList(relation_body_item)) + rbrack
Goal = pp.Dict(pp.Group(relation_name + relation_body))
Comparison_Predicate = pp.Group(variable + pp.one_of("< >") + integer)("pred*")
Query = Goal("head") + ":-" + pp.DelimitedList(Goal | Comparison_Predicate)
test = """Q(x,y,z):-Bloo(x,"Mitsis",y),Foo(y,z,1243),y>28,x<12,x>3"""
queryRes = Query.parse_string(test, parse_all=True)
print(queryRes.dump())
self.assertParseResultsEquals(
queryRes.pred,
expected_list=[["y", ">", "28"], ["x", "<", "12"], ["x", ">", "3"]],
msg=f"Incorrect list for attribute pred, {queryRes.pred.as_list()}",
)
def testReStringRange(self):
testCases = (
r"[A-Z]",
r"[A-A]",
r"[A-Za-z]",
r"[A-z]",
r"[\ -\~]",
r"[\0x20-0]",
r"[\0x21-\0x7E]",
r"[\0xa1-\0xfe]",
r"[\040-0]",
r"[A-Za-z0-9]",
r"[A-Za-z0-9_]",
r"[A-Za-z0-9_$]",
r"[A-Za-z0-9_$\-]",
r"[^0-9\\]",
r"[a-zA-Z]",
r"[/\^~]",
r"[=\+\-!]",
r"[A-]",
r"[-A]",
r"[\x21]",
r"[а-яА-ЯёЁA-Z$_\041α-ω]",
r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]",
r"[\0xa1-\0xbf\0xd7\0xf7]",
r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]",
r"[\0xa1-\0xbf\0xd7\0xf7]",
r"[\\[\]\/\-\*\.\$\+\^\?()~ ]",
)
expectedResults = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"A",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz",
" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
" !\"#$%&'()*+,-./0",
"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
"¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþ",
" !\"#$%&'()*+,-./0",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$-",
"0123456789\\",
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
"/^~",
"=+-!",
"A-",
"-A",
"!",
"абвгдежзийклмнопрстуфхцчшщъыьэюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯёЁABCDEFGHIJKLMNOPQRSTUVWXYZ$_!αβγδεζηθικλμνξοπρςστυφχψω",
"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ",
"¡¢£¤¥¦§¨©ª«¬\xad®¯°±²³´µ¶·¸¹º»¼½¾¿×÷",
pp.alphas8bit,
pp.punc8bit,
r"\[]/-*.$+^?()~ ",
)
for test in zip(testCases, expectedResults):
t, exp = test
res = pp.srange(t)
# print(t, "->", res)
self.assertEqual(
exp,
res,
f"srange error, srange({t!r})->'{res!r}', expected '{exp!r}'",
)
def testSkipToParserTests(self):
thingToFind = pp.Literal("working")
testExpr = (
pp.SkipTo(pp.Literal(";"), include=True, ignore=pp.c_style_comment)
+ thingToFind
)
def test_parse(someText):
print(testExpr.parse_string(someText, parse_all=True))
# This first test works, as the SkipTo expression is immediately following the ignore expression (c_style_comment)
test_parse("some text /* comment with ; in */; working")
# This second test previously failed, as there is text following the ignore expression, and before the SkipTo expression.
test_parse("some text /* comment with ; in */some other stuff; working")
# tests for optional fail_on argument
testExpr = (
pp.SkipTo(
pp.Literal(";"), include=True, ignore=pp.c_style_comment, fail_on="other"
)
+ thingToFind
)
test_parse("some text /* comment with ; in */; working")
with self.assertRaisesParseException():
test_parse("some text /* comment with ; in */some other stuff; working")
# test that we correctly create named results
text = "prefixDATAsuffix"
data = pp.Literal("DATA")
suffix = pp.Literal("suffix")
expr = pp.SkipTo(data + suffix)("prefix") + data + suffix
result = expr.parse_string(text, parse_all=True)
self.assertTrue(
isinstance(result.prefix, str),
"SkipTo created with wrong saveAsList attribute",
)
alpha_word = (~pp.Literal("end") + pp.Word(pp.alphas, as_keyword=True)).set_name(
"alpha"
)
num_word = pp.Word(pp.nums, as_keyword=True).set_name("int")
def test(expr, test_string, expected_list, expected_dict):
if (expected_list, expected_dict) == (None, None):
with self.assertRaises(
Exception, msg=f"{expr} failed to parse {test_string!r}"
):
expr.parse_string(test_string, parse_all=True)
else:
result = expr.parse_string(test_string, parse_all=True)
self.assertParseResultsEquals(
result, expected_list=expected_list, expected_dict=expected_dict
)
# ellipses for SkipTo
e = ... + pp.Literal("end")
test(e, "start 123 end", ["start 123 ", "end"], {"_skipped": ["start 123 "]})
e = pp.Suppress(...) + pp.Literal("end")
test(e, "start 123 end", ["end"], {})
e = pp.Literal("start") + ... + pp.Literal("end")
test(e, "start 123 end", ["start", "123 ", "end"], {"_skipped": ["123 "]})
e = ... + pp.Literal("middle") + ... + pp.Literal("end")
test(
e,
"start 123 middle 456 end",
["start 123 ", "middle", "456 ", "end"],
{"_skipped": ["start 123 ", "456 "]},
)
e = pp.Suppress(...) + pp.Literal("middle") + ... + pp.Literal("end")
test(
e,
"start 123 middle 456 end",
["middle", "456 ", "end"],
{"_skipped": ["456 "]},
)
e = pp.Literal("start") + ...
test(e, "start 123 end", None, None)
e = pp.And(["start", ..., "end"])
test(e, "start 123 end", ["start", "123 ", "end"], {"_skipped": ["123 "]})
e = pp.And([..., "end"])
test(e, "start 123 end", ["start 123 ", "end"], {"_skipped": ["start 123 "]})
e = "start" + (num_word | ...) + "end"
test(e, "start 456 end", ["start", "456", "end"], {})
test(
e,
"start 123 456 end",
["start", "123", "456 ", "end"],
{"_skipped": ["456 "]},
)
test(e, "start end", ["start", "", "end"], {"_skipped": ["missing <int>"]})
# e = define_expr('"start" + (num_word | ...)("inner") + "end"')
# test(e, "start 456 end", ['start', '456', 'end'], {'inner': '456'})
e = "start" + (alpha_word[...] & num_word[...] | ...) + "end"
test(e, "start 456 red end", ["start", "456", "red", "end"], {})
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start 456 red + end",
["start", "456", "red", "+ ", "end"],
{"_skipped": ["+ "]},
)
test(e, "start red end", ["start", "red", "end"], {})
test(e, "start 456 end", ["start", "456", "end"], {})
test(e, "start end", ["start", "end"], {})
test(e, "start 456 + end", ["start", "456", "+ ", "end"], {"_skipped": ["+ "]})
e = "start" + (alpha_word[1, ...] & num_word[1, ...] | ...) + "end"
test(e, "start 456 red end", ["start", "456", "red", "end"], {})
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start 456 red + end",
["start", "456", "red", "+ ", "end"],
{"_skipped": ["+ "]},
)
test(e, "start red end", ["start", "red ", "end"], {"_skipped": ["red "]})
test(e, "start 456 end", ["start", "456 ", "end"], {"_skipped": ["456 "]})
test(
e,
"start end",
["start", "", "end"],
{"_skipped": ["missing <{{alpha}... & {int}...}>"]},
)
test(e, "start 456 + end", ["start", "456 + ", "end"], {"_skipped": ["456 + "]})
e = "start" + (alpha_word | ...) + (num_word | ...) + "end"
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start red end",
["start", "red", "", "end"],
{"_skipped": ["missing <int>"]},
)
test(
e,
"start 456 end",
["start", "", "456", "end"],
{"_skipped": ["missing <alpha>"]},
)
test(
e,
"start end",
["start", "", "", "end"],
{"_skipped": ["missing <alpha>", "missing <int>"]},
)
e = pp.Literal("start") + ... + "+" + ... + "end"
test(
e,
"start red + 456 end",
["start", "red ", "+", "456 ", "end"],
{"_skipped": ["red ", "456 "]},
)
def testSkipToPreParseIgnoreExprs(self):
# added to verify fix to Issue #475
from pyparsing import Word, alphanums, python_style_comment
some_grammar = Word(alphanums) + ":=" + ... + ";"
some_grammar.ignore(python_style_comment)
try:
result = some_grammar.parse_string(
"""\
var1 := 2 # 3; <== this semi-colon will match!
+ 1;
""",
parse_all=True,
)
except ParseException as pe:
print(pe.explain())
raise
else:
print(result.dump())
def testSkipToIgnoreExpr2(self):
a, star = pp.Literal.using_each("a*")
wrapper = a + ... + a
expr = star + pp.SkipTo(star, ignore=wrapper) + star
# pyparsing 3.0.9 -> ['*', 'a_*_a', '*']
# pyparsing 3.1.0 -> ['*', '', '*']
self.assertParseAndCheckList(expr, "*a_*_a*", ["*", "a_*_a", "*"])
def testEllipsisRepetition(self):
word = pp.Word(pp.alphas).set_name("word")
num = pp.Word(pp.nums).set_name("num")
exprs = [
word[...] + num,
word * ... + num,
word[0, ...] + num,
word[1, ...] + num,
word[2, ...] + num,
word[..., 3] + num,
word[2] + num,
]
expected_res = [
r"([abcd]+ )*\d+",
r"([abcd]+ )*\d+",
r"([abcd]+ )*\d+",
r"([abcd]+ )+\d+",
r"([abcd]+ ){2,}\d+",
r"([abcd]+ ){0,3}\d+",
r"([abcd]+ ){2}\d+",
]
tests = ["aa bb cc dd 123", "bb cc dd 123", "cc dd 123", "dd 123", "123"]
all_success = True
for expr, expected_re in zip(exprs, expected_res):
successful_tests = [t for t in tests if re.match(expected_re, t)]
failure_tests = [t for t in tests if not re.match(expected_re, t)]
success1, _ = expr.run_tests(successful_tests)
success2, _ = expr.run_tests(failure_tests, failure_tests=True)
all_success = all_success and success1 and success2
if not all_success:
print("Failed expression:", expr)
break
self.assertTrue(all_success, "failed getItem_ellipsis test")
def testEllipsisRepetitionWithResultsNames(self):
label = pp.Word(pp.alphas)
val = ppc.integer()
parser = label("label") + pp.ZeroOrMore(val)("values")
_, results = parser.run_tests(
"""
a 1
b 1 2 3
c
"""
)
expected = [
(["a", 1], {"label": "a", "values": [1]}),
(["b", 1, 2, 3], {"label": "b", "values": [1, 2, 3]}),
(["c"], {"label": "c", "values": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
parser = label("label") + val[...]("values")
_, results = parser.run_tests(
"""
a 1
b 1 2 3
c
"""
)
expected = [
(["a", 1], {"label": "a", "values": [1]}),
(["b", 1, 2, 3], {"label": "b", "values": [1, 2, 3]}),
(["c"], {"label": "c", "values": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
pt = pp.Group(val("x") + pp.Suppress(",") + val("y"))
parser = label("label") + pt[...]("points")
_, results = parser.run_tests(
"""
a 1,1
b 1,1 2,2 3,3
c
"""
)
expected = [
(["a", [1, 1]], {"label": "a", "points": [{"x": 1, "y": 1}]}),
(
["b", [1, 1], [2, 2], [3, 3]],
{
"label": "b",
"points": [{"x": 1, "y": 1}, {"x": 2, "y": 2}, {"x": 3, "y": 3}],
},
),
(["c"], {"label": "c", "points": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
def testCustomQuotes(self):
testString = r"""
sdlfjs :sdf\:jls::djf: sl:kfsjf
sdlfjs -sdf\:jls::--djf: sl-kfsjf
sdlfjs -sdf\:::jls::--djf: sl:::-kfsjf
sdlfjs ^sdf\:jls^^--djf^ sl-kfsjf
sdlfjs ^^^==sdf\:j=lz::--djf: sl=^^=kfsjf
sdlfjs ==sdf\:j=ls::--djf: sl==kfsjf^^^
"""
print(testString)
colonQuotes = pp.QuotedString(":", "\\", "::")
dashQuotes = pp.QuotedString("-", "\\", "--")
hatQuotes = pp.QuotedString("^", "\\")
hatQuotes1 = pp.QuotedString("^", "\\", "^^")
dblEqQuotes = pp.QuotedString("==", "\\")
def test(label, quoteExpr, expected):
print(label)
print(quoteExpr.pattern)
print(quoteExpr.search_string(testString))
print(quoteExpr.search_string(testString)[0][0])
print(f"{expected}")
self.assertEqual(
expected,
quoteExpr.search_string(testString)[0][0],
f"failed to match {quoteExpr}, expected '{expected}', got '{quoteExpr.search_string(testString)[0]}'",
)
print()
test("colonQuotes", colonQuotes, r"sdf:jls:djf")
test("dashQuotes", dashQuotes, r"sdf:jls::-djf: sl")
test("hatQuotes", hatQuotes, r"sdf:jls")
test("hatQuotes1", hatQuotes1, r"sdf:jls^--djf")
test("dblEqQuotes", dblEqQuotes, r"sdf:j=ls::--djf: sl")
test("::: quotes", pp.QuotedString(":::"), "jls::--djf: sl")
test("==-- quotes", pp.QuotedString("==", end_quote_char="--"), r"sdf\:j=lz::")
test(
"^^^ multiline quotes",
pp.QuotedString("^^^", multiline=True),
r"""==sdf\:j=lz::--djf: sl=^^=kfsjf
sdlfjs ==sdf\:j=ls::--djf: sl==kfsjf""",
)
with self.assertRaises(ValueError):
pp.QuotedString("", "\\")
def testCustomQuotes2(self):
qs = pp.QuotedString(quote_char=".[", end_quote_char="].")
print(qs.reString)
self.assertParseAndCheckList(qs, ".[...].", ["..."])
self.assertParseAndCheckList(qs, ".[].", [""])
self.assertParseAndCheckList(qs, ".[]].", ["]"])
self.assertParseAndCheckList(qs, ".[]]].", ["]]"])
qs = pp.QuotedString(quote_char="+*", end_quote_char="*+")
print(qs.reString)
self.assertParseAndCheckList(qs, "+*...*+", ["..."])
self.assertParseAndCheckList(qs, "+**+", [""])
self.assertParseAndCheckList(qs, "+***+", ["*"])
self.assertParseAndCheckList(qs, "+****+", ["**"])
qs = pp.QuotedString(quote_char="*/", end_quote_char="/*")
print(qs.reString)
self.assertParseAndCheckList(qs, "*/.../*", ["..."])
self.assertParseAndCheckList(qs, "*//*", [""])
self.assertParseAndCheckList(qs, "*///*", ["/"])
self.assertParseAndCheckList(qs, "*////*", ["//"])
def testRepeater(self):
if ParserElement._packratEnabled or ParserElement._left_recursion_enabled:
print("skipping this test, not compatible with memoization")
return
first = pp.Word("abcdef").set_name("word1")
bridge = pp.Word(pp.nums).set_name("number")
second = pp.match_previous_literal(first).set_name("repeat(word1Literal)")
seq = first + bridge + second
tests = [
("abc12abc", True),
("abc12aabc", False),
("abc12cba", True),
("abc12bca", True),
]
for tst, expected in tests:
found = False
for tokens, start, end in seq.scan_string(tst):
f, b, s = tokens
print(f, b, s)
found = True
if not found:
print("No literal match in", tst)
self.assertEqual(
expected,
found,
f"Failed repeater for test: {tst}, matching {seq}",
)
print()
# retest using match_previous_expr instead of match_previous_literal
second = pp.match_previous_expr(first).set_name("repeat(word1expr)")
seq = first + bridge + second
tests = [("abc12abc", True), ("abc12cba", False), ("abc12abcdef", False)]
for tst, expected in tests:
found = False
for tokens, start, end in seq.scan_string(tst):
print(tokens)
found = True
if not found:
print("No expression match in", tst)
self.assertEqual(
expected,
found,
f"Failed repeater for test: {tst}, matching {seq}",
)
print()
first = pp.Word("abcdef").set_name("word1")
bridge = pp.Word(pp.nums).set_name("number")
second = pp.match_previous_expr(first).set_name("repeat(word1)")
seq = first + bridge + second
csFirst = seq.set_name("word-num-word")
csSecond = pp.match_previous_expr(csFirst)
compoundSeq = csFirst + ":" + csSecond
compoundSeq.streamline()
print(compoundSeq)
tests = [
("abc12abc:abc12abc", True),
("abc12cba:abc12abc", False),
("abc12abc:abc12abcdef", False),
]
for tst, expected in tests:
found = False
for tokens, start, end in compoundSeq.scan_string(tst):
print("match:", tokens)
found = True
break
if not found:
print("No expression match in", tst)
self.assertEqual(
expected,
found,
f"Failed repeater for test: {tst}, matching {seq}",
)
print()
eFirst = pp.Word(pp.nums)
eSecond = pp.match_previous_expr(eFirst)
eSeq = eFirst + ":" + eSecond
tests = [("1:1A", True), ("1:10", False)]
for tst, expected in tests:
found = False
for tokens, start, end in eSeq.scan_string(tst):
print(tokens)
found = True
if not found:
print("No match in", tst)
self.assertEqual(
expected,
found,
f"Failed repeater for test: {tst}, matching {seq}",
)
def testRepeater2(self):
"""test match_previous_literal with empty repeater"""
if ParserElement._packratEnabled or ParserElement._left_recursion_enabled:
print("skipping this test, not compatible with memoization")
return
first = pp.Optional(pp.Word("abcdef").set_name("words1"))
bridge = pp.Word(pp.nums).set_name("number")
second = pp.match_previous_literal(first).set_name("repeat(word1Literal)")
seq = first + bridge + second
tst = "12"
expected = ["12"]
result = seq.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater3(self):
"""test match_previous_literal with multiple repeater tokens"""
if ParserElement._packratEnabled or ParserElement._left_recursion_enabled:
print("skipping this test, not compatible with memoization")
return
first = pp.Word("a") + pp.Word("d")
bridge = pp.Word(pp.nums).set_name("number")
second = pp.match_previous_literal(first) # ("second")
seq = first + bridge + second
tst = "aaaddd12aaaddd"
expected = ["aaa", "ddd", "12", "aaa", "ddd"]
result = seq.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater4(self):
"""test match_previous_expr with multiple repeater tokens"""
if ParserElement._packratEnabled or ParserElement._left_recursion_enabled:
print("skipping this test, not compatible with memoization")
return
first = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas))
bridge = pp.Word(pp.nums)
# no matching is used - this is just here for a sanity check
# second = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas))("second")
# second = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas)).set_results_name("second")
# ISSUE: when match_previous_expr returns multiple tokens the matching tokens are nested an extra level deep.
# This behavior is not seen with a single return token (see testRepeater5 directly below.)
second = pp.match_previous_expr(first)
expr = first + bridge.suppress() + second
tst = "aaa ddd 12 aaa ddd"
expected = [["aaa", "ddd"], ["aaa", "ddd"]]
result = expr.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater5(self):
"""a simplified testRepeater4 to examine match_previous_expr with a single repeater token"""
if ParserElement._packratEnabled or ParserElement._left_recursion_enabled:
print("skipping this test, not compatible with memoization")
return
first = pp.Word(pp.alphas)
bridge = pp.Word(pp.nums)
second = pp.match_previous_expr(first)
expr = first + bridge.suppress() + second
tst = "aaa 12 aaa"
expected = tst.replace("12", "").split()
result = expr.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRecursiveCombine(self):
testInput = "myc(114)r(11)dd"
stream = pp.Forward()
stream <<= pp.Optional(pp.Word(pp.alphas)) + pp.Optional(
"(" + pp.Word(pp.nums) + ")" + stream
)
expected = ["".join(stream.parse_string(testInput, parse_all=True))]
print(expected)
stream = pp.Forward()
stream << pp.Combine(
pp.Optional(pp.Word(pp.alphas))
+ pp.Optional("(" + pp.Word(pp.nums) + ")" + stream)
)
testVal = stream.parse_string(testInput, parse_all=True)
print(testVal)
self.assertParseResultsEquals(testVal, expected_list=expected)
def testSetNameToStrAndNone(self):
wd = pp.Word(pp.alphas)
with self.subTest():
self.assertEqual("W:(A-Za-z)", wd.name)
with self.subTest():
wd.set_name("test_word")
self.assertEqual("test_word", wd.name)
with self.subTest():
wd.set_name(None)
self.assertEqual("W:(A-Za-z)", wd.name)
# same tests but using name property setter
with self.subTest():
wd.name = "test_word"
self.assertEqual("test_word", wd.name)
with self.subTest():
wd.name = None
self.assertEqual("W:(A-Za-z)", wd.name)
def testCombineSetName(self):
ab = pp.Combine(
pp.Literal("a").set_name("AAA") | pp.Literal("b").set_name("BBB")
).set_name("AB")
self.assertEqual("AB", ab.name)
self.assertEqual("AB", str(ab))
with self.assertRaisesParseException(expected_msg="Expected AB"):
ab.parse_string("C")
def testHTMLEntities(self):
html_source = dedent(
"""\
This & that
2 > 1
0 < 1
Don't get excited!
I said "Don't get excited!"
Copyright © 2021
Dot ⟶ ˙
"""
)
transformer = pp.common_html_entity().add_parse_action(pp.replace_html_entity)
transformed = transformer.transform_string(html_source)
print(transformed)
expected = dedent(
"""\
This & that
2 > 1
0 < 1
Don't get excited!
I said "Don't get excited!"
Copyright © 2021
Dot ⟶ ˙
"""
)
self.assertEqual(expected, transformed)
def testInfixNotationBasicArithEval(self):
import ast
integer = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
variable = pp.Word(pp.alphas, exact=1)
operand = integer | variable
expop = pp.Literal("^")
signop = pp.one_of("+ -")
multop = pp.one_of("* /")
plusop = pp.one_of("+ -")
factop = pp.Literal("!")
# fmt: off
expr = pp.infix_notation(
operand,
[
(factop, 1, pp.OpAssoc.LEFT),
(expop, 2, pp.OpAssoc.RIGHT),
(signop, 1, pp.OpAssoc.RIGHT),
(multop, 2, pp.OpAssoc.LEFT),
(plusop, 2, pp.OpAssoc.LEFT),
],
)
# fmt: on
test = [
"9 + 2 + 3",
"9 + 2 * 3",
"(9 + 2) * 3",
"(9 + -2) * 3",
"(9 + --2) * 3",
"(9 + -2) * 3^2^2",
"(9! + -2) * 3^2^2",
"M*X + B",
"M*(X + B)",
"1+2*-3^4*5+-+-6",
"3!!",
]
expected = """[[9, '+', 2, '+', 3]]
[[9, '+', [2, '*', 3]]]
[[[9, '+', 2], '*', 3]]
[[[9, '+', ['-', 2]], '*', 3]]
[[[9, '+', ['-', ['-', 2]]], '*', 3]]
[[[9, '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[[[9, '!'], '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[['M', '*', 'X'], '+', 'B']]
[['M', '*', ['X', '+', 'B']]]
[[1, '+', [2, '*', ['-', [3, '^', 4]], '*', 5], '+', ['-', ['+', ['-', 6]]]]]
[[3, '!', '!']]""".split(
"\n"
)
expected = [ast.literal_eval(x.strip()) for x in expected]
for test_str, exp_list in zip(test, expected):
self.assertParseAndCheckList(expr, test_str, exp_list, verbose=True)
def testInfixNotationEvalBoolExprUsingAstClasses(self):
boolVars = {"True": True, "False": False}
class BoolOperand:
reprsymbol = ""
def __init__(self, t):
self.args = t[0][0::2]
def __str__(self):
sep = f" {self.reprsymbol} "
return f"({sep.join(map(str, self.args))})"
class BoolAnd(BoolOperand):
reprsymbol = "&"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if not v:
return False
return True
class BoolOr(BoolOperand):
reprsymbol = "|"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if v:
return True
return False
class BoolNot:
def __init__(self, t):
self.arg = t[0][1]
def __str__(self):
return f"~{self.arg}"
def __bool__(self):
if isinstance(self.arg, str):
v = boolVars[self.arg]
else:
v = bool(self.arg)
return not v
boolOperand = pp.Word(pp.alphas, max=1, as_keyword=True) | pp.one_of("True False")
# fmt: off
boolExpr = pp.infix_notation(
boolOperand,
[
("not", 1, pp.OpAssoc.RIGHT, BoolNot),
("and", 2, pp.OpAssoc.LEFT, BoolAnd),
("or", 2, pp.OpAssoc.LEFT, BoolOr),
],
)
# fmt: on
test = [
"p and not q",
"not not p",
"not(p and q)",
"q or not p and r",
"q or not p or not r",
"q or not (p and r)",
"p or q or r",
"p or q or r and False",
"(p or q or r) and False",
]
boolVars["p"] = True
boolVars["q"] = False
boolVars["r"] = True
print("p =", boolVars["p"])
print("q =", boolVars["q"])
print("r =", boolVars["r"])
print()
for t in test:
res = boolExpr.parse_string(t, parse_all=True)
print(t, "\n", res[0], "=", bool(res[0]), "\n")
expected = eval(t, {}, boolVars)
self.assertEqual(expected, bool(res[0]), f"failed boolean eval test {t}")
def testInfixNotationMinimalParseActionCalls(self):
count = 0
def evaluate_int(t):
nonlocal count
value = int(t[0])
print("evaluate_int", value)
count += 1
return value
integer = pp.Word(pp.nums).set_parse_action(evaluate_int)
variable = pp.Word(pp.alphas, exact=1)
operand = integer | variable
expop = pp.Literal("^")
signop = pp.one_of("+ -")
multop = pp.one_of("* /")
plusop = pp.one_of("+ -")
factop = pp.Literal("!")
# fmt: off
expr = pp.infix_notation(
operand,
[
(factop, 1, pp.OpAssoc.LEFT),
(expop, 2, pp.OpAssoc.LEFT),
(signop, 1, pp.OpAssoc.RIGHT),
(multop, 2, pp.OpAssoc.LEFT),
(plusop, 2, pp.OpAssoc.LEFT),
],
)
# fmt: on
test = ["9"]
for t in test:
count = 0
print(f"{t!r} => {expr.parse_string(t, parse_all=True)} (count={count})")
self.assertEqual(1, count, "count evaluated too many times!")
def testInfixNotationWithParseActions(self):
word = pp.Word(pp.alphas)
def supLiteral(s):
"""Returns the suppressed literal s"""
return pp.Literal(s).suppress()
def booleanExpr(atom):
ops = [
(supLiteral("!"), 1, pp.OpAssoc.RIGHT, lambda s, l, t: ["!", t[0][0]]),
(pp.one_of("= !="), 2, pp.OpAssoc.LEFT),
(supLiteral("&"), 2, pp.OpAssoc.LEFT, lambda s, l, t: ["&", t[0]]),
(supLiteral("|"), 2, pp.OpAssoc.LEFT, lambda s, l, t: ["|", t[0]]),
]
return pp.infix_notation(atom, ops)
f = booleanExpr(word) + pp.StringEnd()
tests = [
("bar = foo", [["bar", "=", "foo"]]),
(
"bar = foo & baz = fee",
["&", [["bar", "=", "foo"], ["baz", "=", "fee"]]],
),
]
for test, expected in tests:
print(test)
results = f.parse_string(test, parse_all=True)
print(results)
self.assertParseResultsEquals(results, expected_list=expected)
print()
def testInfixNotationGrammarTest5(self):
expop = pp.Literal("**")
signop = pp.one_of("+ -")
multop = pp.one_of("* /")
plusop = pp.one_of("+ -")
class ExprNode:
def __init__(self, tokens):
self.tokens = tokens[0]
def eval(self):
return None
class NumberNode(ExprNode):
def eval(self):
return self.tokens
class SignOp(ExprNode):
def eval(self):
mult = {"+": 1, "-": -1}[self.tokens[0]]
return mult * self.tokens[1].eval()
class BinOp(ExprNode):
opn_map = {}
def eval(self):
ret = self.tokens[0].eval()
for op, operand in zip(self.tokens[1::2], self.tokens[2::2]):
ret = self.opn_map[op](ret, operand.eval())
return ret
class ExpOp(BinOp):
opn_map = {"**": lambda a, b: b**a}
class MultOp(BinOp):
import operator
opn_map = {"*": operator.mul, "/": operator.truediv}
class AddOp(BinOp):
import operator
opn_map = {"+": operator.add, "-": operator.sub}
operand = ppc.number().set_parse_action(NumberNode)
# fmt: off
expr = pp.infix_notation(
operand,
[
(expop, 2, pp.OpAssoc.LEFT, (lambda pr: [pr[0][::-1]], ExpOp)),
(signop, 1, pp.OpAssoc.RIGHT, SignOp),
(multop, 2, pp.OpAssoc.LEFT, MultOp),
(plusop, 2, pp.OpAssoc.LEFT, AddOp),
],
)
# fmt: on
tests = """\
2+7
2**3
2**3**2
3**9
3**3**2
"""
for t in tests.splitlines():
t = t.strip()
if not t:
continue
parsed = expr.parse_string(t, parse_all=True)
eval_value = parsed[0].eval()
self.assertEqual(
eval(t),
eval_value,
f"Error evaluating {t!r}, expected {eval(t)!r}, got {eval_value!r}",
)
def testInfixNotationExceptions(self):
num = pp.Word(pp.nums)
# fmt: off
# arity 3 with None opExpr - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infix_notation(
num,
[
(None, 3, pp.OpAssoc.LEFT),
]
)
# arity 3 with invalid tuple - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infix_notation(
num,
[
(("+", "-", "*"), 3, pp.OpAssoc.LEFT),
]
)
# left arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infix_notation(
num,
[
("*", 4, pp.OpAssoc.LEFT),
]
)
# right arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infix_notation(
num,
[
("*", 4, pp.OpAssoc.RIGHT),
]
)
# assoc not from OpAssoc - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infix_notation(
num,
[
("*", 2, "LEFT"),
]
)
# fmt: on
def testInfixNotationWithNonOperators(self):
# left arity 2 with None expr
# right arity 2 with None expr
num = pp.Word(pp.nums).add_parse_action(pp.token_map(int))
ident = ppc.identifier()
# fmt: off
for assoc in (pp.OpAssoc.LEFT, pp.OpAssoc.RIGHT):
expr = pp.infix_notation(
num | ident,
[
(None, 2, assoc),
("+", 2, pp.OpAssoc.LEFT),
]
)
self.assertParseAndCheckList(expr, "3x+2", [[[3, "x"], "+", 2]])
# fmt: on
def testInfixNotationTernaryOperator(self):
# left arity 3
# right arity 3
num = pp.Word(pp.nums).add_parse_action(pp.token_map(int))
# fmt: off
for assoc in (pp.OpAssoc.LEFT, pp.OpAssoc.RIGHT):
expr = pp.infix_notation(
num,
[
("+", 2, pp.OpAssoc.LEFT),
(("?", ":"), 3, assoc),
]
)
self.assertParseAndCheckList(
expr, "3 + 2? 12: 13", [[[3, "+", 2], "?", 12, ":", 13]]
)
# fmt: on
def testInfixNotationWithAlternateParenSymbols(self):
num = pp.Word(pp.nums).add_parse_action(pp.token_map(int))
# fmt: off
expr = pp.infix_notation(
num,
[
("+", 2, pp.OpAssoc.LEFT),
],
lpar="(",
rpar=")",
)
self.assertParseAndCheckList(
expr, "3 + (2 + 11)", [[3, '+', [2, '+', 11]]]
)
expr = pp.infix_notation(
num,
[
("+", 2, pp.OpAssoc.LEFT),
],
lpar="<",
rpar=">",
)
self.assertParseAndCheckList(
expr, "3 + <2 + 11>", [[3, '+', [2, '+', 11]]]
)
expr = pp.infix_notation(
num,
[
("+", 2, pp.OpAssoc.LEFT),
],
lpar=pp.Literal("<"),
rpar=pp.Literal(">"),
)
self.assertParseAndCheckList(
expr, "3 + <2 + 11>", [[3, '+', ['<', [2, '+', 11], '>']]]
)
expr = pp.infix_notation(
num,
[
("+", 2, pp.OpAssoc.LEFT),
],
lpar=pp.Literal("<<"),
rpar=pp.Literal(">>"),
)
self.assertParseAndCheckList(
expr, "3 + <<2 + 11>>", [[3, '+', ['<<', [2, '+', 11], '>>']]]
)
# fmt: on
def testParseResultsPickle(self):
import pickle
# test 1
body = pp.make_html_tags("BODY")[0]
result = body.parse_string(
"<BODY BGCOLOR='#00FFBB' FGCOLOR=black>", parse_all=True
)
print(result.dump())
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
result.dump(),
newresult.dump(),
f"Error pickling ParseResults object (protocol={protocol})",
)
def testParseResultsPickle2(self):
import pickle
word = pp.Word(pp.alphas + "'.")
salutation = pp.OneOrMore(word)
comma = pp.Literal(",")
greetee = pp.OneOrMore(word)
endpunc = pp.one_of("! ?")
greeting = (
salutation("greeting")
+ pp.Suppress(comma)
+ greetee("greetee")
+ endpunc("punc*")[1, ...]
)
string = "Good morning, Miss Crabtree!"
result = greeting.parse_string(string, parse_all=True)
self.assertParseResultsEquals(
result,
["Good", "morning", "Miss", "Crabtree", "!"],
{
"greeting": ["Good", "morning"],
"greetee": ["Miss", "Crabtree"],
"punc": ["!"],
},
)
print(result.dump())
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
newresult.dump(),
result.dump(),
f"failed to pickle/unpickle ParseResults: expected {result!r}, got {newresult!r}",
)
def testParseResultsPickle3(self):
import pickle
# result with aslist=False
res_not_as_list = pp.Word("ABC").parse_string("BABBAB", parse_all=True)
# result with aslist=True
res_as_list = pp.Group(pp.Word("ABC")).parse_string("BABBAB", parse_all=True)
# result with modal=True
res_modal = pp.Word("ABC")("name").parse_string("BABBAB", parse_all=True)
# self.assertTrue(res_modal._modal)
# result with modal=False
res_not_modal = pp.Word("ABC")("name*").parse_string("BABBAB", parse_all=True)
# self.assertFalse(res_not_modal._modal)
for result in (res_as_list, res_not_as_list, res_modal, res_not_modal):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
newresult.dump(),
result.dump(),
f"failed to pickle/unpickle ParseResults: expected {result!r}, got {newresult!r}",
)
def testParseResultsInsertWithResultsNames(self):
test_string = "1 2 3 dice rolled first try"
wd = pp.Word(pp.alphas)
num = ppc.number
expr = (
pp.Group(num[1, ...])("nums")
+ wd("label")
+ pp.Group(wd[...])("additional")
)
result = expr.parse_string(test_string, parse_all=True)
print("Pre-insert")
print(result.dump())
result.insert(1, sum(result.nums))
print("\nPost-insert")
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[[1, 2, 3], 6, "dice", ["rolled", "first", "try"]],
expected_dict={
"additional": ["rolled", "first", "try"],
"label": "dice",
"nums": [1, 2, 3],
},
)
def testParseResultsStringListUsingCombine(self):
test_string = "1 2 3 dice rolled first try"
wd = pp.Word(pp.alphas)
num = ppc.number
expr = pp.Combine(
pp.Group(num[1, ...])("nums")
+ wd("label")
+ pp.Group(wd[...])("additional"),
join_string="/",
adjacent=False,
)
self.assertEqual(
"123/dice/rolledfirsttry", expr.parse_string(test_string, parse_all=True)[0]
)
def testParseResultsAcceptingACollectionTypeValue(self):
# from Issue #276 - ParseResults parameterizes generic types if passed as the value of toklist parameter
# https://github.com/pyparsing/pyparsing/issues/276?notification_referrer_id=MDE4Ok5vdGlmaWNhdGlvblRocmVhZDE4MzU4NDYwNzI6MzgzODc1
#
# behavior of ParseResults code changed with Python 3.9
results_with_int = pp.ParseResults(toklist=int, name="type_", aslist=False)
self.assertEqual(int, results_with_int["type_"])
results_with_tuple = pp.ParseResults(toklist=tuple, name="type_", aslist=False)
self.assertEqual(tuple, results_with_tuple["type_"])
def testParseResultsReturningDunderAttribute(self):
# from Issue #208
parser = pp.Word(pp.alphas)("A")
result = parser.parse_string("abc", parse_all=True)
print(result.dump())
self.assertEqual("abc", result.A)
self.assertEqual("", result.B)
with self.assertRaises(AttributeError):
result.__xyz__
def testParseResultsNamedResultWithEmptyString(self):
# from Issue #470
# Check which values can be returned from a parse action
for test_value, expected_in_result_by_name in [
("x", True),
("", True),
(True, True),
(False, True),
(1, True),
(0, True),
(None, True),
(b"", True),
(b"a", True),
([], False),
((), False),
]:
msg = (
f"value = {test_value!r},"
f" expected X {'not ' if not expected_in_result_by_name else ''}in result"
)
with self.subTest(msg):
print(msg)
grammar = (
(pp.Suppress("a") + pp.ZeroOrMore("x"))
.add_parse_action(lambda p: test_value)
.set_results_name("X")
)
result = grammar.parse_string("a")
print(result.dump())
if expected_in_result_by_name:
self.assertIn(
"X",
result,
f"Expected X not found for parse action value {test_value!r}",
)
print(repr(result["X"]))
else:
self.assertNotIn(
"X",
result,
f"Unexpected X found for parse action value {test_value!r}",
)
with self.assertRaises(KeyError):
print(repr(result["X"]))
print()
# Do not add a parse result.
msg = "value = <no parse action defined>, expected X in result"
with self.subTest(msg):
print(msg)
grammar = (pp.Suppress("a") + pp.ZeroOrMore("x")).set_results_name("X")
result = grammar.parse_string("a")
print(result.dump())
self.assertIn("X", result, f"Expected X not found with no parse action")
print()
# Test by directly creating a ParseResults
print("Create empty string value directly")
result = pp.ParseResults("", name="X")
print(result.dump())
self.assertIn(
"X",
result,
"failed to construct ParseResults with named value using empty string",
)
print(repr(result["X"]))
print()
print("Create empty string value from a dict")
result = pp.ParseResults.from_dict({"X": ""})
print(result.dump())
self.assertIn(
"X",
result,
"failed to construct ParseResults with named value using from_dict",
)
print(repr(result["X"]))
def testMatchOnlyAtCol(self):
"""successfully use match_only_at_col helper function"""
expr = pp.Word(pp.nums)
expr.set_parse_action(pp.match_only_at_col(5))
largerExpr = pp.ZeroOrMore(pp.Word("A")) + expr + pp.ZeroOrMore(pp.Word("A"))
res = largerExpr.parse_string("A A 3 A", parse_all=True)
print(res.dump())
def testMatchOnlyAtColErr(self):
"""raise a ParseException in match_only_at_col with incorrect col"""
expr = pp.Word(pp.nums)
expr.set_parse_action(pp.match_only_at_col(1))
largerExpr = pp.ZeroOrMore(pp.Word("A")) + expr + pp.ZeroOrMore(pp.Word("A"))
with self.assertRaisesParseException():
largerExpr.parse_string("A A 3 A", parse_all=True)
def testParseResultsWithNamedTuple(self):
expr = pp.Literal("A")("Achar")
expr.set_parse_action(pp.replace_with(tuple(["A", "Z"])))
res = expr.parse_string("A", parse_all=True)
print(repr(res))
print(res.Achar)
self.assertParseResultsEquals(
res,
expected_dict={"Achar": ("A", "Z")},
msg=f"Failed accessing named results containing a tuple, got {res.Achar!r}",
)
def testParserElementAddOperatorWithOtherTypes(self):
"""test the overridden "+" operator with other data types"""
# ParserElement + str
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") + "suf"
result = expr.parse_string("spam eggs suf", parse_all=True)
print(result)
expected_l = ["spam", "eggs", "suf"]
self.assertParseResultsEquals(
result, expected_l, msg="issue with ParserElement + str"
)
# str + ParserElement
with self.subTest():
expr = "pre" + pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parse_string("pre spam eggs", parse_all=True)
print(result)
expected_l = ["pre", "spam", "eggs"]
self.assertParseResultsEquals(
result, expected_l, msg="issue with str + ParserElement"
)
# ParserElement + int
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn ParserElement + int"):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") + 12
self.assertEqual(expr, None)
# int + ParserElement
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn int + ParserElement"):
expr = 12 + pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
self.assertEqual(expr, None)
def testParserElementSubOperatorWithOtherTypes(self):
"""test the overridden "-" operator with other data types"""
# ParserElement - str
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") - "suf"
result = expr.parse_string("spam eggs suf", parse_all=True)
print(result)
expected = ["spam", "eggs", "suf"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParserElement - str"
)
# str - ParserElement
with self.subTest():
expr = "pre" - pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parse_string("pre spam eggs", parse_all=True)
print(result)
expected = ["pre", "spam", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with str - ParserElement"
)
# ParserElement - int
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn ParserElement - int"):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") - 12
self.assertEqual(expr, None)
# int - ParserElement
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn int - ParserElement"):
expr = 12 - pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
self.assertEqual(expr, None)
def testParserElementMulOperatorWithTuples(self):
"""test ParserElement "*" with various tuples"""
# ParserElement * (None, n)
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (None, 3)
with self.subTest():
results1 = expr.parse_string("spam", parse_all=True)
print(results1.dump())
expected = ["spam"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * w/ optional matches"
)
with self.subTest():
results2 = expr.parse_string("spam 12 23 34", parse_all=True)
print(results2.dump())
expected = ["spam", "12", "23", "34"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * w/ optional matches"
)
# ParserElement * (1, 1)
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (1, 1)
results = expr.parse_string("spam 45", parse_all=True)
print(results.dump())
expected = ["spam", "45"]
self.assertParseResultsEquals(
results, expected, msg="issue with ParserElement * (1, 1)"
)
# ParserElement * (1, 1+n)
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (1, 3)
results1 = expr.parse_string("spam 100", parse_all=True)
print(results1.dump())
expected = ["spam", "100"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * (1, 1+n)"
)
with self.subTest():
results2 = expr.parse_string("spam 100 200 300", parse_all=True)
print(results2.dump())
expected = ["spam", "100", "200", "300"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * (1, 1+n)"
)
# ParserElement * (lesser, greater)
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (2, 3)
results1 = expr.parse_string("spam 1 2", parse_all=True)
print(results1.dump())
expected = ["spam", "1", "2"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * (lesser, greater)"
)
with self.subTest():
results2 = expr.parse_string("spam 1 2 3", parse_all=True)
print(results2.dump())
expected = ["spam", "1", "2", "3"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * (lesser, greater)"
)
# ParserElement * (greater, lesser)
with self.subTest():
with self.assertRaises(
ValueError, msg="ParserElement * (greater, lesser) should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * (3, 2)
# ParserElement * (str, str)
with self.subTest():
with self.assertRaises(
TypeError, msg="ParserElement * (str, str) should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * (
"2",
"3",
)
def testParserElementMulByZero(self):
alpwd = pp.Word(pp.alphas)
numwd = pp.Word(pp.nums)
test_string = "abd def ghi jkl"
with self.subTest():
parser = alpwd * 2 + numwd * 0 + alpwd * 2
self.assertParseAndCheckList(
parser, test_string, expected_list=test_string.split()
)
with self.subTest():
parser = alpwd * 2 + numwd * (0, 0) + alpwd * 2
self.assertParseAndCheckList(
parser, test_string, expected_list=test_string.split()
)
def testParserElementMulOperatorWithOtherTypes(self):
"""test the overridden "*" operator with other data types"""
# ParserElement * str
with self.subTest():
with self.assertRaises(
TypeError, msg="ParserElement * str should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * "3"
# str * ParserElement
with self.subTest():
with self.assertRaises(
TypeError, msg="str * ParserElement should raise error"
):
expr = pp.Word(pp.alphas)("first") + "3" * pp.Word(pp.nums)("second")
# ParserElement * int
with self.subTest():
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * 2
results = expr.parse_string("spam 11 22", parse_all=True)
print(results.dump())
expected = ["spam", "11", "22"]
self.assertParseResultsEquals(
results, expected, msg="issue with ParserElement * int"
)
# int * ParserElement
with self.subTest():
expr = pp.Word(pp.alphas)("first") + 2 * pp.Word(pp.nums)("second*")
results = expr.parse_string("spam 111 222", parse_all=True)
print(results.dump())
expected = ["spam", "111", "222"]
self.assertParseResultsEquals(
results, expected, msg="issue with int * ParserElement"
)
def testParserElementMatchFirstOperatorWithOtherTypes(self):
"""test the overridden "|" operator with other data types"""
# ParserElement | int
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn ParserElement | int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") | 12)
self.assertEqual(expr, None)
# int | ParserElement
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn int | ParserElement"):
expr = pp.Word(pp.alphas)("first") + (12 | pp.Word(pp.alphas)("second"))
self.assertEqual(expr, None)
def testParserElementMatchLongestWithOtherTypes(self):
"""test the overridden "^" operator with other data types"""
# ParserElement ^ str
with self.subTest():
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.nums)("second") ^ "eggs")
result = expr.parse_string("spam eggs", parse_all=True)
print(result)
expected = ["spam", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParserElement ^ str"
)
# str ^ ParserElement
with self.subTest():
expr = ("pre" ^ pp.Word("pr")("first")) + pp.Word(pp.alphas)("second")
result = expr.parse_string("pre eggs", parse_all=True)
print(result)
expected = ["pre", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with str ^ ParserElement"
)
# ParserElement ^ int
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn ParserElement ^ int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") ^ 54)
self.assertEqual(expr, None)
# int ^ ParserElement
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn int ^ ParserElement"):
expr = pp.Word(pp.alphas)("first") + (65 ^ pp.Word(pp.alphas)("second"))
self.assertEqual(expr, None)
def testParserElementEachOperatorWithOtherTypes(self):
"""test the overridden "&" operator with other data types"""
# ParserElement & str
with self.subTest():
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") & "and")
with self.assertRaisesParseException(msg="issue with ParserElement & str"):
result = expr.parse_string("spam and eggs", parse_all=True)
# str & ParserElement
with self.subTest():
expr = pp.Word(pp.alphas)("first") + ("and" & pp.Word(pp.alphas)("second"))
result = expr.parse_string("spam and eggs", parse_all=True)
print(result.dump())
expected_l = ["spam", "and", "eggs"]
expected_d = {"first": "spam", "second": "eggs"}
self.assertParseResultsEquals(
result,
expected_list=expected_l,
expected_dict=expected_d,
msg="issue with str & ParserElement",
)
# ParserElement & int
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn ParserElement & int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas) & 78)
self.assertEqual(expr, None)
# int & ParserElement
with self.subTest():
expr = None
with self.assertRaises(TypeError, msg="failed to warn int & ParserElement"):
expr = pp.Word(pp.alphas)("first") + (89 & pp.Word(pp.alphas))
self.assertEqual(expr, None)
def testLshiftOperatorWithOtherTypes(self):
# Forward << ParserElement
with self.subTest():
f = pp.Forward()
f << pp.Word(pp.alphas)[...]
test_string = "sljdf sldkjf Ljs"
result = f.parse_string(test_string)
print(result)
self.assertEqual(test_string.split(), result.as_list())
# Forward << str
with self.subTest():
f = pp.Forward()
f << "AAA"
test_string = "AAA"
result = f.parse_string(test_string)
print(result)
self.assertEqual(test_string.split(), result.as_list())
# Forward << int
with self.subTest():
f = pp.Forward()
with self.assertRaises(TypeError, msg="failed to warn int & ParserElement"):
f << 12
def testParserElementPassedThreeArgsToMultiplierShorthand(self):
"""test the ParserElement form expr[m,n,o]"""
with self.assertRaises(
TypeError, msg="failed to warn three index arguments to expr[m, n, o]"
):
expr = pp.Word(pp.alphas)[2, 3, 4]
def testParserElementPassedStrToMultiplierShorthand(self):
"""test the ParserElement form expr[str]"""
with self.assertRaises(
TypeError, msg="failed to raise expected error using string multiplier"
):
expr2 = pp.Word(pp.alphas)["2"]
def testParseResultsNewEdgeCases(self):
"""test less common paths of ParseResults.__new__()"""
parser = pp.Word(pp.alphas)[...]
result = parser.parse_string("sldkjf sldkjf", parse_all=True)
# hasattr uses __getattr__, which for ParseResults will return "" if the
# results name is not defined. So hasattr() won't work with ParseResults.
# Have to use __contains__ instead to test for existence.
# self.assertFalse(hasattr(result, "A"))
self.assertFalse("A" in result)
# create new ParseResults w/ None
result1 = pp.ParseResults(None)
print(result1.dump())
self.assertParseResultsEquals(
result1, [], msg="ParseResults(None) should return empty ParseResults"
)
# create new ParseResults w/ integer name
result2 = pp.ParseResults(name=12)
print(result2.dump())
self.assertEqual(
"12",
result2.get_name(),
"ParseResults int name should be accepted and converted to str",
)
# create new ParseResults w/ generator type
gen = (a for a in range(1, 6))
result3 = pp.ParseResults(gen)
print(result3.dump())
expected3 = [1, 2, 3, 4, 5]
self.assertParseResultsEquals(
result3, expected3, msg="issue initializing ParseResults w/ gen type"
)
def testParseResultsReversed(self):
"""test simple case of reversed(ParseResults)"""
tst = "1 2 3 4 5"
expr = pp.OneOrMore(pp.Word(pp.nums))
result = expr.parse_string(tst, parse_all=True)
reversed_list = [ii for ii in reversed(result)]
print(reversed_list)
expected = ["5", "4", "3", "2", "1"]
self.assertEqual(
expected, reversed_list, msg="issue calling reversed(ParseResults)"
)
def testParseResultsValues(self):
"""test simple case of ParseResults.values()"""
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parse_string("spam eggs", parse_all=True)
values_set = set(result.values())
print(values_set)
expected = {"spam", "eggs"}
self.assertEqual(
expected, values_set, msg="issue calling ParseResults.values()"
)
def testParseResultsAppend(self):
"""test simple case of ParseResults.append()"""
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
expr = pp.OneOrMore(pp.Word(pp.nums)).add_parse_action(append_sum)
result = expr.parse_string("0 123 321", parse_all=True)
expected = ["0", "123", "321", 444]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.append()"
)
def testParseResultsClear(self):
"""test simple case of ParseResults.clear()"""
tst = "spam eggs"
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(
result, ["spam", "eggs"], msg="issue with ParseResults before clear()"
)
result.clear()
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[],
expected_dict={},
msg="issue with ParseResults.clear()",
)
def testParseResultsExtendWithString(self):
"""test ParseResults.extend() with input of type str"""
# use a parse action to append the reverse of the matched strings to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
tst = "abc def ghi"
expr = pp.OneOrMore(pp.Word(pp.alphas))
result = expr.add_parse_action(make_palindrome).parse_string(tst, parse_all=True)
print(result.dump())
expected = ["abc", "def", "ghi", "ihg", "fed", "cba"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.extend(str)"
)
def testParseResultsExtendWithParseResults(self):
"""test ParseResults.extend() with input of type ParseResults"""
expr = pp.OneOrMore(pp.Word(pp.alphas))
result1 = expr.parse_string("spam eggs", parse_all=True)
result2 = expr.parse_string("foo bar", parse_all=True)
result1.extend(result2)
print(result1.dump())
expected = ["spam", "eggs", "foo", "bar"]
self.assertParseResultsEquals(
result1, expected, msg="issue with ParseResults.extend(ParseResults)"
)
def testQuotedStringLoc(self):
expr = pp.QuotedString("'")
expr.add_parse_action(lambda t: t[0].upper())
test_string = "Using 'quotes' for 'sarcasm' or 'emphasis' is not good 'style'."
transformed = expr.transform_string(test_string)
print(test_string)
print(transformed)
expected = re.sub(r"'([^']+)'", lambda match: match[1].upper(), test_string)
self.assertEqual(expected, transformed)
def testParseResultsWithNestedNames(self):
from pyparsing import (
Dict,
Literal,
Group,
Optional,
Regex,
QuotedString,
one_of,
Or,
CaselessKeyword,
ZeroOrMore,
)
RELATION_SYMBOLS = "= > < >= <= <> =="
def _set_info(string, location, tokens):
for t in tokens:
try:
t["_info_"] = (string, location)
except TypeError:
pass
tokens["_info_"] = (string, location)
def keywords(name):
words = "any all within encloses adj".split()
return Or(map(CaselessKeyword, words))
charString1 = Group(Regex(r'[^()=<>"/\s]+'))("identifier")
charString1.add_parse_action(_set_info)
charString2 = Group(QuotedString('"', "\\"))("quoted")
charString2.add_parse_action(_set_info)
term = Group(charString1 | charString2)
modifier_key = charString1
# relations
comparitor_symbol = one_of(RELATION_SYMBOLS)
named_comparitors = keywords("comparitors")
comparitor = Group(comparitor_symbol | named_comparitors)("comparitor")
comparitor.add_parse_action(_set_info)
def modifier_list1(key):
modifier = Dict(
Literal("/")
+ Group(modifier_key(key))("name")
+ Optional(comparitor_symbol("symbol") + term("value"))
)("modifier")
modifier.add_parse_action(_set_info)
return ZeroOrMore(modifier)("modifier_list")
def modifier_list2(key):
modifier = Dict(
Literal("/")
+ Group(modifier_key(key))("name")
+ Optional(comparitor_symbol("symbol") + term("value")),
asdict=True,
)("modifier")
modifier.add_parse_action(_set_info)
return ZeroOrMore(modifier)("modifier_list")
def modifier_list3(key):
modifier = Group( # this line is different from the others, must group to get results names
Dict(
Literal("/")
+ Group(modifier_key(key))("name")
+ Optional(comparitor_symbol("symbol") + term("value"))
)
)
modifier.add_parse_action(_set_info)
return ZeroOrMore(modifier)("modifier_list")
def modifier_list4(key):
modifier = Dict(
Literal("/")
+ Group(modifier_key(key))("name")
+ Optional(comparitor_symbol("symbol") + term("value")),
asdict=True,
)
modifier.add_parse_action(_set_info)
return ZeroOrMore(modifier)("modifier_list")
for modifier_list_fn in (
modifier_list1,
modifier_list2,
modifier_list3,
modifier_list4,
):
modifier_parser = modifier_list_fn("default")
result = modifier_parser.parse_string(
"/respectaccents/ignoreaccents", parse_all=True
)
for r in result:
print(r)
print(r.get("_info_"))
self.assertEqual([0, 15], [r["_info_"][1] for r in result])
def testParseResultsFromDict(self):
"""test helper classmethod ParseResults.from_dict()"""
dict = {
"first": "123",
"second": 456,
"third": {"threeStr": "789", "threeInt": 789},
}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
print(result.dump())
expected = {name: dict}
self.assertParseResultsEquals(
result,
expected_dict=expected,
msg="issue creating ParseResults.from _dict()",
)
def testParseResultsDir(self):
"""test dir(ParseResults)"""
dict = {"first": "123", "second": "456", "third": "789"}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
dir_result = dir(result)
print(dir_result)
self.assertIn(
name, dir_result, msg="name value wasn't returned by dir(ParseResults)"
)
self.assertIn(
"as_list", dir_result, msg="as_list was not returned by dir(ParseResults)"
)
def testParseResultsInsert(self):
"""test ParseResults.insert() with named tokens"""
from random import randint
result = pp.Word(pp.alphas)[...].parse_string(
"A B C D E F G H I J", parse_all=True
)
compare_list = result.as_list()
print(result)
print(compare_list)
for s in "abcdefghij":
index = randint(-5, 5)
result.insert(index, s)
compare_list.insert(index, s)
print(result)
print(compare_list)
self.assertParseResultsEquals(
result, compare_list, msg="issue with ParseResults.insert()"
)
def testParseResultsAddingSuppressedTokenWithResultsName(self):
parser = "aaa" + (pp.NoMatch() | pp.Suppress("-"))("B")
try:
dd = parser.parse_string("aaa -").as_dict()
except RecursionError:
self.fail("fail getting named result when empty")
def testParseResultsBool(self):
result = pp.Word(pp.alphas)[...].parse_string("AAA", parse_all=True)
self.assertTrue(result, "non-empty ParseResults evaluated as False")
result = pp.Word(pp.alphas)[...].parse_string("", parse_all=True)
self.assertFalse(result, "empty ParseResults evaluated as True")
result["A"] = 0
self.assertTrue(
result,
"ParseResults with empty list but containing a results name evaluated as False",
)
def testParseResultsWithAsListWithAndWithoutFlattening(self):
ppc = pp.common
# define a recursive grammar so we can easily build nested ParseResults
LPAR, RPAR = pp.Suppress.using_each("()")
fn_call = pp.Forward()
fn_arg = fn_call | ppc.identifier | ppc.number
fn_call <<= ppc.identifier + pp.Group(LPAR + pp.Optional(pp.DelimitedList(fn_arg)) + RPAR)
tests = [
("random()", ["random", []]),
("sin(theta)", ["sin", ["theta"]]),
("sin(rad(30))", ["sin", ["rad", [30]]]),
("sin(rad(30), rad(60, 180))", ["sin", ["rad", [30], "rad", [60, 180]]]),
("sin(rad(30), rad(60, 180), alpha)", ["sin", ["rad", [30], "rad", [60, 180], "alpha"]]),
]
for test_string, expected in tests:
with self.subTest():
print(test_string)
observed = fn_call.parse_string(test_string, parse_all=True)
print(observed.as_list())
self.assertEqual(expected, observed.as_list())
print(observed.as_list(flatten=True))
self.assertEqual(flatten(expected), observed.as_list(flatten=True))
print()
def testParseResultsCopy(self):
expr = (
pp.Word(pp.nums)
+ pp.Group(pp.Word(pp.alphas)("key") + "=" + pp.Word(pp.nums)("value"))[...]
)
result = expr.parse_string("1 a=100 b=200 c=300")
print(result.dump())
r2 = result.copy()
print(r2.dump())
# check copy is different, but contained results is the same as in original
self.assertFalse(r2 is result, "copy failed")
self.assertTrue(r2[1] is result[1], "shallow copy failed")
# update result sub-element in place
result[1][0] = "z"
self.assertParseResultsEquals(
result,
expected_list=[
"1",
["z", "=", "100"],
["b", "=", "200"],
["c", "=", "300"],
],
)
# update contained results, verify list and dict contents are updated as expected
result[1][0] = result[1]["key"] = "q"
result[1]["xyz"] = 1000
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[
"1",
["q", "=", "100"],
["b", "=", "200"],
["c", "=", "300"],
],
)
self.assertParseResultsEquals(
result[1], expected_dict={"key": "q", "value": "100", "xyz": 1000}
)
# verify that list and dict contents are the same in copy
self.assertParseResultsEquals(
r2,
expected_list=[
"1",
["q", "=", "100"],
["b", "=", "200"],
["c", "=", "300"],
],
)
self.assertParseResultsEquals(
r2[1], expected_dict={"key": "q", "value": "100", "xyz": 1000}
)
def testParseResultsDeepcopy(self):
expr = (
pp.Word(pp.nums)
+ pp.Group(pp.Word(pp.alphas)("key") + "=" + pp.Word(pp.nums)("value"))[...]
)
result = expr.parse_string("1 a=100 b=200 c=300")
orig_elements = result._toklist[:]
r2 = result.deepcopy()
print(r2.dump())
# check copy and contained results are different from original
self.assertFalse(r2 is result, "copy failed")
self.assertFalse(r2[1] is result[1], "deep copy failed")
# check copy and original are equal
self.assertEqual(result.as_dict(), r2.as_dict())
self.assertEqual(result.as_list(), r2.as_list())
# check original is unchanged
self.assertTrue(
all(
orig_element is result_element
for orig_element, result_element in zip(orig_elements, result._toklist)
)
)
# update contained results
result[1][0] = result[1]["key"] = "q"
result[1]["xyz"] = 1000
print(result.dump())
# verify that list and dict contents are unchanged in the copy
self.assertParseResultsEquals(
r2,
expected_list=[
"1",
["a", "=", "100"],
["b", "=", "200"],
["c", "=", "300"],
],
)
self.assertParseResultsEquals(r2[1], expected_dict={"key": "a", "value": "100"})
def testParseResultsDeepcopy2(self):
expr = (
pp.Word(pp.nums)
+ pp.Group(
pp.Word(pp.alphas)("key") + "=" + pp.Word(pp.nums)("value"), aslist=True
)[...]
)
result = expr.parse_string("1 a=100 b=200 c=300")
r2 = result.deepcopy()
print(r2.dump())
# check copy and contained results are different from original
self.assertFalse(r2 is result, "copy failed")
self.assertFalse(r2[1] is result[1], "deep copy failed")
# update contained results
result[1][0] = "q"
print(result.dump())
# verify that list and dict contents are unchanged in the copy
self.assertParseResultsEquals(
r2,
expected_list=[
"1",
["a", "=", "100"],
["b", "=", "200"],
["c", "=", "300"],
],
)
def testParseResultsDeepcopy3(self):
expr = (
pp.Word(pp.nums)
+ pp.Group(
(
pp.Word(pp.alphas)("key") + "=" + pp.Word(pp.nums)("value")
).add_parse_action(lambda t: tuple(t))
)[...]
)
result = expr.parse_string("1 a=100 b=200 c=300")
r2 = result.deepcopy()
print(r2.dump())
# check copy and contained results are different from original
self.assertFalse(r2 is result, "copy failed")
self.assertFalse(r2[1] is result[1], "deep copy failed")
# update contained results
result[1][0] = "q"
print(result.dump())
# verify that list and dict contents are unchanged in the copy
self.assertParseResultsEquals(
r2,
expected_list=[
"1",
[("a", "=", "100")],
[("b", "=", "200")],
[("c", "=", "300")],
],
)
def testIgnoreString(self):
"""test ParserElement.ignore() passed a string arg"""
tst = "I like totally like love pickles"
expr = pp.Word(pp.alphas)[...].ignore("like")
result = expr.parse_string(tst, parse_all=True)
print(result)
expected = ["I", "totally", "love", "pickles"]
self.assertParseResultsEquals(result, expected, msg="issue with ignore(string)")
def testParseHTMLTags(self):
test = """
<BODY>
<BODY BGCOLOR="#00FFCC">
<BODY BGCOLOR="#00FFAA"/>
<BODY BGCOLOR='#00FFBB' FGCOLOR=black>
<BODY/>
</BODY>
"""
results = [
("startBody", False, "", ""),
("startBody", False, "#00FFCC", ""),
("startBody", True, "#00FFAA", ""),
("startBody", False, "#00FFBB", "black"),
("startBody", True, "", ""),
("endBody", False, "", ""),
]
bodyStart, bodyEnd = pp.make_html_tags("BODY")
resIter = iter(results)
for t, s, e in (bodyStart | bodyEnd).scan_string(test):
print(test[s:e], "->", t)
(expectedType, expectedEmpty, expectedBG, expectedFG) = next(resIter)
print(t.dump())
if "startBody" in t:
self.assertEqual(
expectedEmpty,
bool(t.empty),
f"expected {expectedEmpty and 'empty' or 'not empty'} token,"
f" got {t.empty and 'empty' or 'not empty'}",
)
self.assertEqual(
expectedBG,
t.bgcolor,
f"failed to match BGCOLOR, expected {expectedBG}, got {t.bgcolor}",
)
self.assertEqual(
expectedFG,
t.fgcolor,
f"failed to match FGCOLOR, expected {expectedFG}, got {t.bgcolor}",
)
elif "endBody" in t:
print("end tag")
pass
else:
print("BAD!!!")
def testSetParseActionUncallableErr(self):
"""raise a TypeError in set_parse_action() by adding uncallable arg"""
expr = pp.Literal("A")("Achar")
uncallable = 12
with self.assertRaises(TypeError):
expr.set_parse_action(uncallable)
res = expr.parse_string("A", parse_all=True)
print(res.dump())
def testMulWithNegativeNumber(self):
"""raise a ValueError in __mul__ by multiplying a negative number"""
with self.assertRaises(ValueError):
pp.Literal("A")("Achar") * (-1)
def testMulWithEllipsis(self):
"""multiply an expression with Ellipsis as ``expr * ...`` to match ZeroOrMore"""
expr = pp.Literal("A")("Achar") * ...
res = expr.parse_string("A", parse_all=True)
self.assertEqual(["A"], res.as_list(), "expected expr * ... to match ZeroOrMore")
print(res.dump())
def testUpcaseDowncaseUnicode(self):
import sys
ppu = pp.pyparsing_unicode
a = "\u00bfC\u00f3mo esta usted?"
if not JYTHON_ENV:
ualphas = ppu.alphas
else:
ualphas = "".join(
chr(i)
for i in list(range(0xD800)) + list(range(0xE000, sys.maxunicode))
if chr(i).isalpha()
)
uword = pp.Word(ualphas).set_parse_action(ppc.upcase_tokens)
print = lambda *args: None
print(uword.search_string(a))
uword = pp.Word(ualphas).set_parse_action(ppc.downcase_tokens)
print(uword.search_string(a))
kw = pp.Keyword("mykey", caseless=True).set_parse_action(ppc.upcase_tokens)(
"rname"
)
ret = kw.parse_string("mykey", parse_all=True)
print(ret.rname)
self.assertEqual(
"MYKEY", ret.rname, "failed to upcase with named result (pyparsing_common)"
)
kw = pp.Keyword("MYKEY", caseless=True).set_parse_action(ppc.downcase_tokens)(
"rname"
)
ret = kw.parse_string("mykey", parse_all=True)
print(ret.rname)
self.assertEqual("mykey", ret.rname, "failed to upcase with named result")
if not IRON_PYTHON_ENV:
# test html data
html = "<TR class=maintxt bgColor=#ffffff> \
<TD vAlign=top>Производитель, модель</TD> \
<TD vAlign=top><STRONG>BenQ-Siemens CF61</STRONG></TD> \
" # .decode('utf-8')
# 'Manufacturer, model
text_manuf = "Производитель, модель"
manufacturer = pp.Literal(text_manuf)
td_start, td_end = pp.make_html_tags("td")
manuf_body = (
td_start.suppress()
+ manufacturer
+ pp.SkipTo(td_end)("cells*")
+ td_end.suppress()
)
def testRegexDeferredCompile(self):
"""test deferred compilation of Regex patterns"""
re_expr = pp.Regex(r"[A-Z]*")
self.assertIsNone(re_expr._may_return_empty, "failed to initialize _may_return_empty flag to None")
self.assertEqual(re_expr._re, None)
compiled = re_expr.re
self.assertTrue(re_expr._may_return_empty, "failed to set _may_return_empty flag to True")
self.assertEqual(re_expr._re, compiled)
non_empty_re_expr = pp.Regex(r"[A-Z]+")
self.assertIsNone(non_empty_re_expr._may_return_empty, "failed to initialize _may_return_empty flag to None")
self.assertEqual(non_empty_re_expr._re, None)
compiled = non_empty_re_expr.re
self.assertFalse(non_empty_re_expr._may_return_empty, "failed to set _may_return_empty flag to False")
self.assertEqual(non_empty_re_expr._re, compiled)
def testRegexDeferredCompileCommonHtmlEntity(self):
# this is the most important expression to defer, because it takes a long time to compile
perf_test_common_html_entity = pp.common_html_entity()
# force internal var to None, to simulate a fresh instance
perf_test_common_html_entity._re = None
# just how long does this take anyway?
from time import perf_counter
start = perf_counter()
perf_test_common_html_entity.re # noqa
elapsed = perf_counter() - start
print(f"elapsed time to compile common_html_entity: {elapsed:.4f} sec")
def testParseUsingRegex(self):
signedInt = pp.Regex(r"[-+][0-9]+")
unsignedInt = pp.Regex(r"[0-9]+")
simpleString = pp.Regex(r'("[^\"]*")|(\'[^\']*\')')
namedGrouping = pp.Regex(r'("(?P<content>[^\"]*)")')
compiledRE = pp.Regex(re.compile(r"[A-Z]+"))
def testMatch(expression, instring, shouldPass, expectedString=None):
if shouldPass:
try:
result = expression.parse_string(instring, parse_all=False)
print(f"{repr(expression)} correctly matched {repr(instring)}")
if expectedString != result[0]:
print("\tbut failed to match the pattern as expected:")
print(
f"\tproduced {repr(result[0])} instead of {repr(expectedString)}"
)
return False
return True
except pp.ParseException:
print(f"{expression!r} incorrectly failed to match {instring!r}")
else:
try:
result = expression.parse_string(instring, parse_all=False)
print(f"{expression!r} incorrectly matched {instring!r}")
print(f"\tproduced {result[0]!r} as a result")
except pp.ParseException:
print(f"{expression!r} correctly failed to match {instring!r}")
return True
return False
# These should fail
for i, (test_expr, test_string) in enumerate(
[
(signedInt, "1234 foo"),
(signedInt, " +foo"),
(unsignedInt, "abc"),
(unsignedInt, "+123 foo"),
(simpleString, "foo"),
(simpleString, "\"foo bar'"),
(simpleString, "'foo bar\""),
(compiledRE, "blah"),
],
start = 1
):
with self.subTest(test_expr=test_expr, test_string=test_string):
self.assertTrue(
testMatch(
test_expr,
test_string,
False,
),
f"Re: ({i}) passed, expected fail",
)
# These should pass
for i, (test_expr, test_string, expected_match) in enumerate(
[
(signedInt, " +123", "+123"),
(signedInt, "+123", "+123"),
(signedInt, "+123 foo", "+123"),
(signedInt, "-0 foo", "-0"),
(unsignedInt, "123 foo", "123"),
(unsignedInt, "0 foo", "0"),
(simpleString, '"foo"', '"foo"'),
(simpleString, "'foo bar' baz", "'foo bar'"),
(compiledRE, "BLAH", "BLAH"),
(namedGrouping, '"foo bar" baz', '"foo bar"'),
],
start = i + 1
):
with self.subTest(test_expr=test_expr, test_string=test_string):
self.assertTrue(
testMatch(
test_expr,
test_string,
True,
expected_match,
),
f"Re: ({i}) failed, expected pass",
)
ret = namedGrouping.parse_string('"zork" blah', parse_all=False)
print(ret)
print(list(ret.items()))
print(ret.content)
self.assertEqual("zork", ret.content, "named group lookup failed")
self.assertEqual(
simpleString.parse_string('"zork" blah', parse_all=False)[0],
ret[0],
"Regex not properly returning ParseResults for named vs. unnamed groups",
)
try:
print("lets try an invalid RE")
invRe = pp.Regex("(\"[^\"]*\")|('[^']*'").re
except ValueError as e:
print("successfully rejected an invalid RE:", end=" ")
print(e)
else:
self.fail("failed to reject invalid RE")
with self.assertRaises(
ValueError, msg="failed to warn empty string passed to Regex"
):
pp.Regex("").re # noqa
def testRegexAsType(self):
test_str = "sldkjfj 123 456 lsdfkj"
print("return as list of match groups")
expr = pp.Regex(r"\w+ (\d+) (\d+) (\w+)", as_group_list=True)
expected_group_list = [tuple(test_str.split()[1:])]
result = expr.parse_string(test_str, parse_all=True)
print(result.dump())
print(expected_group_list)
self.assertParseResultsEquals(
result,
expected_list=expected_group_list,
msg="incorrect group list returned by Regex)",
)
print("return as re.match instance")
expr = pp.Regex(
r"\w+ (?P<num1>\d+) (?P<num2>\d+) (?P<last_word>\w+)", as_match=True
)
result = expr.parse_string(test_str, parse_all=True)
print(result.dump())
print(result[0].groups())
print(expected_group_list)
self.assertEqual(
{"num1": "123", "num2": "456", "last_word": "lsdfkj"},
result[0].groupdict(),
"invalid group dict from Regex(as_match=True)",
)
self.assertEqual(
expected_group_list[0],
result[0].groups(),
"incorrect group list returned by Regex(as_match)",
)
def testRegexSub(self):
print("test sub with string")
expr = pp.Regex(r"<title>").sub("'Richard III'")
result = expr.transform_string("This is the title: <title>")
print(result)
self.assertEqual(
"This is the title: 'Richard III'",
result,
"incorrect Regex.sub result with simple string",
)
print("test sub with re string")
expr = pp.Regex(r"([Hh]\d):\s*(.*)").sub(r"<\1>\2</\1>")
result = expr.transform_string(
"h1: This is the main heading\nh2: This is the sub-heading"
)
print(result)
self.assertEqual(
"<h1>This is the main heading</h1>\n<h2>This is the sub-heading</h2>",
result,
"incorrect Regex.sub result with re string",
)
print("test sub with re string (Regex returns re.match)")
expr = pp.Regex(r"([Hh]\d):\s*(.*)", as_match=True).sub(r"<\1>\2</\1>")
result = expr.transform_string(
"h1: This is the main heading\nh2: This is the sub-heading"
)
print(result)
self.assertEqual(
"<h1>This is the main heading</h1>\n<h2>This is the sub-heading</h2>",
result,
"incorrect Regex.sub result with re string",
)
print("test sub with callable that return str")
expr = pp.Regex(r"<(.*?)>").sub(lambda m: m.group(1).upper())
result = expr.transform_string("I want this in upcase: <what? what?>")
print(result)
self.assertEqual(
"I want this in upcase: WHAT? WHAT?",
result,
"incorrect Regex.sub result with callable",
)
with self.assertRaises(TypeError):
pp.Regex(r"<(.*?)>", as_match=True).sub(lambda m: m.group(1).upper())
with self.assertRaises(TypeError):
pp.Regex(r"<(.*?)>", as_group_list=True).sub(lambda m: m.group(1).upper())
with self.assertRaises(TypeError):
pp.Regex(r"<(.*?)>", as_group_list=True).sub("")
def testRegexInvalidType(self):
"""test Regex of an invalid type"""
with self.assertRaises(TypeError, msg="issue with Regex of type int"):
expr = pp.Regex(12)
def testRegexLoopPastEndOfString(self):
"""test Regex matching after end of string"""
NL = pp.LineEnd().suppress()
empty_line = pp.rest_of_line() + NL
result = empty_line[1, 10].parse_string("\n\n")
self.assertEqual(3, len(result))
def testPrecededBy(self):
num = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
interesting_num = pp.PrecededBy(pp.Char("abc")("prefix*")) + num
semi_interesting_num = pp.PrecededBy("_") + num
crazy_num = pp.PrecededBy(pp.Word("^", "$%^")("prefix*"), 10) + num
boring_num = ~pp.PrecededBy(pp.Char("abc_$%^" + pp.nums)) + num
very_boring_num = pp.PrecededBy(pp.WordStart()) + num
finicky_num = pp.PrecededBy(pp.Word("^", "$%^"), retreat=3) + num
s = "c384 b8324 _9293874 _293 404 $%^$^%$2939"
print(s)
for expr, expected_list, expected_dict in [
(interesting_num, [384, 8324], {"prefix": ["c", "b"]}),
(semi_interesting_num, [9293874, 293], {}),
(boring_num, [404], {}),
(crazy_num, [2939], {"prefix": ["^%$"]}),
(finicky_num, [2939], {}),
(very_boring_num, [404], {}),
]:
# print(expr.search_string(s))
result = sum(expr.search_string(s))
print(result.dump())
self.assertParseResultsEquals(result, expected_list, expected_dict)
# infinite loop test - from Issue #127
string_test = "notworking"
# negs = pp.Or(['not', 'un'])('negs')
negs_pb = pp.PrecededBy("not", retreat=100)("negs_lb")
# negs_pb = pp.PrecededBy(negs, retreat=100)('negs_lb')
pattern = (negs_pb + pp.Literal("working"))("main")
results = pattern.search_string(string_test)
try:
print(results.dump())
except RecursionError:
self.fail("got maximum excursion limit exception")
else:
print("got maximum excursion limit exception")
def testCountedArray(self):
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
countedField = pp.counted_array(integer)
r = pp.OneOrMore(pp.Group(countedField)).parse_string(testString, parse_all=True)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
# addresses bug raised by Ralf Vosseler
def testCountedArrayTest2(self):
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
countedField = pp.counted_array(integer)
dummy = pp.Word("A")
r = pp.OneOrMore(pp.Group(dummy ^ countedField)).parse_string(
testString, parse_all=True
)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
def testCountedArrayTest3(self):
int_chars = "_" + pp.alphas
array_counter = pp.Word(int_chars).set_parse_action(
lambda t: int_chars.index(t[0])
)
# 123456789012345678901234567890
testString = "B 5 7 F 0 1 2 3 4 5 _ C 5 4 3"
integer = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
countedField = pp.counted_array(integer, int_expr=array_counter)
r = pp.OneOrMore(pp.Group(countedField)).parse_string(testString, parse_all=True)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
def testCountedArrayTest4(self):
ppc = pp.pyparsing_common
# array counter contains several fields - first field *must* be the number of
# items in the array
# - number of elements
# - type of elements
# - source of elements
counter_with_metadata = (
ppc.integer("count") + ppc.identifier("type") + ppc.identifier("source")
)
countedField = pp.counted_array(
pp.Word(pp.alphanums), int_expr=counter_with_metadata
)
testString = (
"5 string input item1 item2 item3 item4 item5 0 int user 2 int file 3 8"
)
r = pp.Group(countedField("items"))[...].parse_string(testString, parse_all=True)
print(testString)
print(r.dump())
print(f"type = {r.type!r}")
print(f"source = {r.source!r}")
self.assertParseResultsEquals(
r,
expected_list=[
["item1", "item2", "item3", "item4", "item5"],
[],
["3", "8"],
],
)
self.assertParseResultsEquals(
r[0],
expected_dict={
"count": 5,
"source": "input",
"type": "string",
"items": ["item1", "item2", "item3", "item4", "item5"],
},
)
# parse with additional fields between the count and the actual list items
count_with_metadata = ppc.integer + pp.Word(pp.alphas)("type")
typed_array = pp.counted_array(
pp.Word(pp.alphanums), int_expr=count_with_metadata
)("items")
result = typed_array.parse_string("3 bool True True False", parse_all=True)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=["True", "True", "False"],
expected_dict={"type": "bool", "items": ["True", "True", "False"]},
)
def testLineStart(self):
pass_tests = [
"""\
AAA
BBB
""",
"""\
AAA...
BBB
""",
]
fail_tests = [
"""\
AAA...
...BBB
""",
"""\
AAA BBB
""",
]
# cleanup test strings
pass_tests = [
"\n".join(s.lstrip() for s in t.splitlines()).replace(".", " ")
for t in pass_tests
]
fail_tests = [
"\n".join(s.lstrip() for s in t.splitlines()).replace(".", " ")
for t in fail_tests
]
test_patt = pp.Word("A") - pp.LineStart() + pp.Word("B")
print(test_patt.streamline())
success, _ = test_patt.run_tests(pass_tests)
self.assertTrue(success, "failed LineStart passing tests (1)")
success, _ = test_patt.run_tests(fail_tests, failure_tests=True)
self.assertTrue(success, "failed LineStart failure mode tests (1)")
with ppt.reset_pyparsing_context():
print(r"no \n in default whitespace chars")
pp.ParserElement.set_default_whitespace_chars(" ")
test_patt = pp.Word("A") - pp.LineStart() + pp.Word("B")
print(test_patt.streamline())
# should fail the pass tests too, since \n is no longer valid whitespace and we aren't parsing for it
success, _ = test_patt.run_tests(pass_tests, failure_tests=True)
self.assertTrue(success, "failed LineStart passing tests (2)")
success, _ = test_patt.run_tests(fail_tests, failure_tests=True)
self.assertTrue(success, "failed LineStart failure mode tests (2)")
test_patt = (
pp.Word("A")
- pp.LineEnd().suppress()
+ pp.LineStart()
+ pp.Word("B")
+ pp.LineEnd().suppress()
)
print(test_patt.streamline())
success, _ = test_patt.run_tests(pass_tests)
self.assertTrue(success, "failed LineStart passing tests (3)")
success, _ = test_patt.run_tests(fail_tests, failure_tests=True)
self.assertTrue(success, "failed LineStart failure mode tests (3)")
def testLineStart2(self):
test = """\
AAA 1
AAA 2
AAA
B AAA
"""
test = dedent(test)
print(pp.testing.with_line_numbers(test))
print("normal parsing")
for t, s, e in (pp.LineStart() + "AAA").scan_string(test):
print(s, e, pp.lineno(s, test), pp.line(s, test), repr(t))
print()
self.assertEqual(
"A", t[0][0], "failed LineStart with insignificant newlines"
)
print(r"parsing without \n in whitespace chars")
with ppt.reset_pyparsing_context():
pp.ParserElement.set_default_whitespace_chars(" ")
for t, s, e in (pp.LineStart() + "AAA").scan_string(test):
print(s, e, pp.lineno(s, test), pp.line(s, test), repr(test[s]))
print()
self.assertEqual(
"A", t[0][0], "failed LineStart with insignificant newlines"
)
def testLineStartWithLeadingSpaces(self):
# testing issue #272
# reverted in 3.0.2 - LineStart() + expr will match expr even if there
# are leading spaces. To force "only at column 1" matching, use
# AtLineStart(expr).
instring = dedent(
"""
a
b
c
d
e
f
g
"""
)
print(pp.testing.with_line_numbers(instring))
alpha_line = (
pp.LineStart().leave_whitespace()
+ pp.Word(pp.alphas)
+ pp.LineEnd().suppress()
)
tests = [
alpha_line,
pp.Group(alpha_line),
alpha_line | pp.Word("_"),
alpha_line | alpha_line,
pp.MatchFirst([alpha_line, alpha_line]),
alpha_line ^ pp.Word("_"),
alpha_line ^ alpha_line,
pp.Or([alpha_line, pp.Word("_")]),
pp.LineStart() + pp.Word(pp.alphas) + pp.LineEnd().suppress(),
pp.And([pp.LineStart(), pp.Word(pp.alphas), pp.LineEnd().suppress()]),
]
fails = []
for test in tests:
print(test.search_string(instring))
if ["a", "b", "c", "d", "e", "f", "g"] != flatten(
sum(test.search_string(instring)).as_list()
):
fails.append(test)
if fails:
self.fail(
"failed LineStart tests:\n{}".format(
"\n".join(str(expr) for expr in fails)
)
)
def testAtLineStart(self):
test = dedent(
"""\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
"""
)
expr = pp.AtLineStart("AAA") + pp.rest_of_line
for t in expr.search_string(test):
print(t)
self.assertEqual(
["AAA", " this line", "AAA", " and this line"],
sum(expr.search_string(test)).as_list(),
)
def testStringStart(self):
self.assertParseAndCheckList(
pp.StringStart() + pp.Word(pp.nums), "123", ["123"]
)
self.assertParseAndCheckList(
pp.StringStart() + pp.Word(pp.nums), " 123", ["123"]
)
self.assertParseAndCheckList(pp.StringStart() + "123", "123", ["123"])
self.assertParseAndCheckList(pp.StringStart() + "123", " 123", ["123"])
self.assertParseAndCheckList(pp.AtStringStart(pp.Word(pp.nums)), "123", ["123"])
self.assertParseAndCheckList(pp.AtStringStart("123"), "123", ["123"])
with self.assertRaisesParseException():
pp.AtStringStart(pp.Word(pp.nums)).parse_string(" 123")
with self.assertRaisesParseException():
pp.AtStringStart("123").parse_string(" 123")
def testStringStartAndLineStartInsideAnd(self):
# fmt: off
P_MTARG = (
pp.StringStart()
+ pp.Word("abcde")
+ pp.StringEnd()
)
P_MTARG2 = (
pp.LineStart()
+ pp.Word("abcde")
+ pp.StringEnd()
)
P_MTARG3 = (
pp.AtLineStart(pp.Word("abcde"))
+ pp.StringEnd()
)
# fmt: on
def test(expr, string):
expr.streamline()
print(expr, repr(string), end=" ")
print(expr.parse_string(string))
test(P_MTARG, "aaa")
test(P_MTARG2, "aaa")
test(P_MTARG2, "\naaa")
test(P_MTARG2, " aaa")
test(P_MTARG2, "\n aaa")
with self.assertRaisesParseException():
test(P_MTARG3, " aaa")
with self.assertRaisesParseException():
test(P_MTARG3, "\n aaa")
def testLineAndStringEnd(self):
NLs = pp.OneOrMore(pp.line_end)
bnf1 = pp.DelimitedList(pp.Word(pp.alphanums).leave_whitespace(), NLs)
bnf2 = pp.Word(pp.alphanums) + pp.string_end
bnf3 = pp.Word(pp.alphanums) + pp.SkipTo(pp.string_end)
tests = [
("testA\ntestB\ntestC\n", ["testA", "testB", "testC"]),
("testD\ntestE\ntestF", ["testD", "testE", "testF"]),
("a", ["a"]),
]
for test, expected in tests:
res1 = bnf1.parse_string(test, parse_all=True)
print(res1, "=?", expected)
self.assertParseResultsEquals(
res1,
expected_list=expected,
msg=f"Failed line_end/string_end test (1): {test!r} -> {res1}",
)
res2 = bnf2.search_string(test)[0]
print(res2, "=?", expected[-1:])
self.assertParseResultsEquals(
res2,
expected_list=expected[-1:],
msg=f"Failed line_end/string_end test (2): {test!r} -> {res2}",
)
res3 = bnf3.parse_string(test, parse_all=True)
first = res3[0]
rest = res3[1]
# ~ print res3.dump()
print(repr(rest), "=?", repr(test[len(first) + 1 :]))
self.assertEqual(
rest,
test[len(first) + 1 :],
msg=f"Failed line_end/string_end test (3): {test!r} -> {res3.as_list()}",
)
print()
k = pp.Regex(r"a+", flags=re.S + re.M)
k = k.parse_with_tabs()
k = k.leave_whitespace()
tests = [
(r"aaa", ["aaa"]),
(r"\naaa", None),
(r"a\naa", None),
(r"aaa\n", None),
]
for i, (src, expected) in enumerate(tests):
with self.subTest("", src=src, expected=expected):
print(i, repr(src).replace("\\\\", "\\"), end=" ")
if expected is None:
with self.assertRaisesParseException():
k.parse_string(src, parse_all=True)
else:
res = k.parse_string(src, parse_all=True)
self.assertParseResultsEquals(
res, expected, msg=f"Failed on parse_all=True test {i}"
)
def testVariableParseActionArgs(self):
pa3 = lambda s, l, t: t
pa2 = lambda l, t: t
pa1 = lambda t: t
pa0 = lambda: None
class Callable3:
def __call__(self, s, l, t):
return t
class Callable2:
def __call__(self, l, t):
return t
class Callable1:
def __call__(self, t):
return t
class Callable0:
def __call__(self):
return
class CallableS3:
@staticmethod
def __call__(s, l, t):
return t
class CallableS2:
@staticmethod
def __call__(l, t):
return t
class CallableS1:
@staticmethod
def __call__(t):
return t
class CallableS0:
@staticmethod
def __call__():
return
class CallableC3:
@classmethod
def __call__(cls, s, l, t):
return t
class CallableC2:
@classmethod
def __call__(cls, l, t):
return t
class CallableC1:
@classmethod
def __call__(cls, t):
return t
class CallableC0:
@classmethod
def __call__(cls):
return
class parseActionHolder:
@staticmethod
def pa3(s, l, t):
return t
@staticmethod
def pa2(l, t):
return t
@staticmethod
def pa1(t):
return t
@staticmethod
def pa0():
return
def paArgs(*args):
print(args)
return args[2]
class ClassAsPA0:
def __init__(self):
pass
def __str__(self):
return "A"
class ClassAsPA1:
def __init__(self, t):
print("making a ClassAsPA1")
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPA2:
def __init__(self, l, t):
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPA3:
def __init__(self, s, l, t):
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPAStarNew(tuple):
def __new__(cls, *args):
print("make a ClassAsPAStarNew", args)
return tuple.__new__(cls, *args[2].as_list())
def __str__(self):
return "".join(self)
A = pp.Literal("A").set_parse_action(pa0)
B = pp.Literal("B").set_parse_action(pa1)
C = pp.Literal("C").set_parse_action(pa2)
D = pp.Literal("D").set_parse_action(pa3)
E = pp.Literal("E").set_parse_action(Callable0())
F = pp.Literal("F").set_parse_action(Callable1())
G = pp.Literal("G").set_parse_action(Callable2())
H = pp.Literal("H").set_parse_action(Callable3())
I = pp.Literal("I").set_parse_action(CallableS0())
J = pp.Literal("J").set_parse_action(CallableS1())
K = pp.Literal("K").set_parse_action(CallableS2())
L = pp.Literal("L").set_parse_action(CallableS3())
M = pp.Literal("M").set_parse_action(CallableC0())
N = pp.Literal("N").set_parse_action(CallableC1())
O = pp.Literal("O").set_parse_action(CallableC2())
P = pp.Literal("P").set_parse_action(CallableC3())
Q = pp.Literal("Q").set_parse_action(paArgs)
R = pp.Literal("R").set_parse_action(parseActionHolder.pa3)
S = pp.Literal("S").set_parse_action(parseActionHolder.pa2)
T = pp.Literal("T").set_parse_action(parseActionHolder.pa1)
U = pp.Literal("U").set_parse_action(parseActionHolder.pa0)
V = pp.Literal("V")
# fmt: off
gg = pp.OneOrMore(
A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | U | V | B | T
)
# fmt: on
testString = "VUTSRQPONMLKJIHGFEDCBA"
res = gg.parse_string(testString, parse_all=True)
print(res)
self.assertParseResultsEquals(
res,
expected_list=list(testString),
msg="Failed to parse using variable length parse actions",
)
A = pp.Literal("A").set_parse_action(ClassAsPA0)
B = pp.Literal("B").set_parse_action(ClassAsPA1)
C = pp.Literal("C").set_parse_action(ClassAsPA2)
D = pp.Literal("D").set_parse_action(ClassAsPA3)
E = pp.Literal("E").set_parse_action(ClassAsPAStarNew)
# fmt: off
gg = pp.OneOrMore(
A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V
)
# fmt: on
testString = "VUTSRQPONMLKJIHGFEDCBA"
res = gg.parse_string(testString, parse_all=True)
print(list(map(str, res)))
self.assertEqual(
list(testString),
list(map(str, res)),
"Failed to parse using variable length parse actions "
"using class constructors as parse actions",
)
def testSingleArgException(self):
testMessage = "just one arg"
try:
raise pp.ParseFatalException(testMessage)
except pp.ParseBaseException as pbe:
print("Received expected exception:", pbe)
raisedMsg = pbe.msg
self.assertEqual(
testMessage, raisedMsg, "Failed to get correct exception message"
)
def testOriginalTextFor(self):
def rfn(t):
return f"{t.src}:{len(''.join(t))}"
makeHTMLStartTag = lambda tag: pp.original_text_for(
pp.make_html_tags(tag)[0], as_string=False
)
# use the lambda, Luke
start = makeHTMLStartTag("IMG")
# don't replace our fancy parse action with rfn,
# append rfn to the list of parse actions
start.add_parse_action(rfn)
text = """_<img src="images/cal.png"
alt="cal image" width="16" height="15">_"""
s = start.transform_string(text)
print(s)
self.assertTrue(
s.startswith("_images/cal.png:"), "failed to preserve input s properly"
)
self.assertTrue(
s.endswith("77_"), "failed to return full original text properly"
)
tag_fields = makeHTMLStartTag("IMG").search_string(text)[0]
print(sorted(tag_fields.keys()))
self.assertEqual(
["alt", "empty", "height", "src", "startImg", "tag", "width"],
sorted(tag_fields.keys()),
"failed to preserve results names in original_text_for",
)
def testPackratParsingCacheCopy(self):
integer = pp.Word(pp.nums).set_name("integer")
id = pp.Word(pp.alphas + "_", pp.alphanums + "_")
simpleType = pp.Literal("int")
arrayType = simpleType + ("[" + pp.DelimitedList(integer) + "]")[...]
varType = arrayType | simpleType
varDec = varType + pp.DelimitedList(id + pp.Optional("=" + integer)) + ";"
codeBlock = pp.Literal("{}")
funcDef = (
pp.Optional(varType | "void")
+ id
+ "("
+ (pp.DelimitedList(varType + id) | "void" | pp.empty)
+ ")"
+ codeBlock
)
program = varDec | funcDef
input = "int f(){}"
self.assertParseAndCheckList(
program,
input,
["int", "f", "(", ")", "{}"],
msg="Error in packrat parsing",
verbose=True,
)
def testPackratParsingCacheCopyTest2(self):
DO, AA = list(map(pp.Keyword, "DO AA".split()))
LPAR, RPAR = list(map(pp.Suppress, "()"))
identifier = ~AA + pp.Word("Z")
function_name = identifier.copy()
# ~ function_name = ~AA + Word("Z") #identifier.copy()
expr = pp.Forward().set_name("expr")
expr <<= pp.Group(
function_name + LPAR + pp.Optional(pp.DelimitedList(expr)) + RPAR
).set_name("functionCall") | identifier.set_name(
"ident"
) # .set_debug()#.set_break()
stmt = DO + pp.Group(pp.DelimitedList(identifier + ".*" | expr))
result = stmt.parse_string("DO Z", parse_all=True)
print(result.as_list())
self.assertEqual(
1, len(result[1]), "packrat parsing is duplicating And term exprs"
)
def testParseResultsDel(self):
grammar = pp.OneOrMore(pp.Word(pp.nums))("ints") + pp.OneOrMore(
pp.Word(pp.alphas)
)("words")
res = grammar.parse_string("123 456 ABC DEF", parse_all=True)
print(res.dump())
origInts = res.ints.as_list()
origWords = res.words.as_list()
del res[1]
del res["words"]
print(res.dump())
self.assertEqual("ABC", res[1], "failed to delete 0'th element correctly")
self.assertEqual(
origInts,
res.ints.as_list(),
"updated named attributes, should have updated list only",
)
self.assertEqual("", res.words, "failed to update named attribute correctly")
self.assertEqual(
"DEF", res[-1], "updated list, should have updated named attributes only"
)
def testWithAttributeParseAction(self):
"""
This unit test checks with_attribute in these ways:
* Argument forms as keywords and tuples
* Selecting matching tags by attribute
* Case-insensitive attribute matching
* Correctly matching tags having the attribute, and rejecting tags not having the attribute
(Unit test written by voigts as part of the Google Highly Open Participation Contest)
"""
data = """
<a>1</a>
<a b="x">2</a>
<a B="x">3</a>
<a b="X">4</a>
<a b="y">5</a>
<a class="boo">8</ a>
"""
tagStart, tagEnd = pp.make_html_tags("a")
expr = tagStart + pp.Word(pp.nums)("value") + tagEnd
expected = (
[
["a", ["b", "x"], False, "2", "</a>"],
["a", ["b", "x"], False, "3", "</a>"],
],
[
["a", ["b", "x"], False, "2", "</a>"],
["a", ["b", "x"], False, "3", "</a>"],
],
[["a", ["class", "boo"], False, "8", "</a>"]],
)
for attrib, exp in zip(
[
pp.with_attribute(b="x"),
# with_attribute(B="x"),
pp.with_attribute(("b", "x")),
# with_attribute(("B", "x")),
pp.with_class("boo"),
],
expected,
):
tagStart.set_parse_action(attrib)
result = expr.search_string(data)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=exp,
msg=f"Failed test, expected {expected}, got {result.as_list()}",
)
def testNestedExpressions(self):
"""
This unit test checks nested_expr in these ways:
- use of default arguments
- use of non-default arguments (such as a pyparsing-defined comment
expression in place of quoted_string)
- use of a custom content expression
- use of a pyparsing expression for opener and closer is *OPTIONAL*
- use of input data containing nesting delimiters
- correct grouping of parsed tokens according to nesting of opening
and closing delimiters in the input string
(Unit test written by christoph... as part of the Google Highly Open Participation Contest)
"""
# All defaults. Straight out of the example script. Also, qualifies for
# the bonus: note the fact that (Z | (E^F) & D) is not parsed :-).
# Tests for bug fixed in 1.4.10
print("Test defaults:")
teststring = "((ax + by)*C) (Z | (E^F) & D)"
expr = pp.nested_expr()
expected = [[["ax", "+", "by"], "*C"]]
result = expr.parse_string(teststring, parse_all=False)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=expected,
msg=f"Defaults didn't work. That's a bad sign. Expected: {expected}, got: {result}",
)
# Going through non-defaults, one by one; trying to think of anything
# odd that might not be properly handled.
# Change opener
print("\nNon-default opener")
teststring = "[[ ax + by)*C)"
expected = [[["ax", "+", "by"], "*C"]]
expr = pp.nested_expr("[")
self.assertParseAndCheckList(
expr,
teststring,
expected,
f"Non-default opener didn't work. Expected: {expected}, got: {result}",
verbose=True,
)
# Change closer
print("\nNon-default closer")
teststring = "((ax + by]*C]"
expected = [[["ax", "+", "by"], "*C"]]
expr = pp.nested_expr(closer="]")
self.assertParseAndCheckList(
expr,
teststring,
expected,
f"Non-default closer didn't work. Expected: {expected}, got: {result}",
verbose=True,
)
# #Multicharacter opener, closer
# opener = "bar"
# closer = "baz"
print("\nLiteral expressions for opener and closer")
opener, closer = map(pp.Literal, "bar baz".split())
expr = pp.nested_expr(
opener, closer, content=pp.Regex(r"([^b ]|b(?!a)|ba(?![rz]))+")
)
teststring = "barbar ax + bybaz*Cbaz"
expected = [[["ax", "+", "by"], "*C"]]
self.assertParseAndCheckList(
expr,
teststring,
expected,
f"Multicharacter opener and closer didn't work. Expected: {expected}, got: {result}",
verbose=True,
)
# Lisp-ish comments
print("\nUse ignore expression (1)")
comment = pp.Regex(r";;.*")
teststring = """
(let ((greeting "Hello, world!")) ;;(foo bar
(display greeting))
"""
expected = [
[
"let",
[["greeting", '"Hello,', 'world!"']],
";;(foo bar",
["display", "greeting"],
]
]
expr = pp.nested_expr(ignore_expr=comment)
self.assertParseAndCheckList(
expr,
teststring,
expected,
f'Lisp-ish comments (";; <...> $") didn\'t work. Expected: {expected}, got: {result}',
verbose=True,
)
# Lisp-ish comments, using a standard bit of pyparsing, and an Or.
print("\nUse ignore expression (2)")
comment = ";;" + pp.rest_of_line
teststring = """
(let ((greeting "Hello, )world!")) ;;(foo bar
(display greeting))
"""
expected = [
[
"let",
[["greeting", '"Hello, )world!"']],
";;",
"(foo bar",
["display", "greeting"],
]
]
expr = pp.nested_expr(ignore_expr=(comment ^ pp.quoted_string))
self.assertParseAndCheckList(
expr,
teststring,
expected,
f'Lisp-ish comments (";; <...> $") and quoted strings didn\'t work. Expected: {expected}, got: {result}',
verbose=True,
)
def testNestedExpressions2(self):
"""test nested_expr with conditions that explore other paths
identical opener and closer
opener and/or closer of type other than string or iterable
multi-character opener and/or closer
single character opener and closer with ignore_expr=None
multi-character opener and/or closer with ignore_expr=None
"""
name = pp.Word(pp.alphanums + "_")
# identical opener and closer
with self.assertRaises(
ValueError, msg="matching opener and closer should raise error"
):
expr = name + pp.nested_expr(opener="{", closer="{")
# opener and/or closer of type other than string or iterable
with self.assertRaises(
ValueError, msg="opener and closer as ints should raise error"
):
expr = name + pp.nested_expr(opener=12, closer=18)
# multi-character opener and/or closer
tstMulti = "aName {{ outer {{ 'inner with opener {{ and closer }} in quoted string' }} }}"
expr = name + pp.nested_expr(opener="{{", closer="}}")
result = expr.parse_string(tstMulti, parse_all=True)
expected = [
"aName",
["outer", ["'inner with opener {{ and closer }} in quoted string'"]],
]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with multi-character opener and closer"
)
# single character opener and closer with ignore_expr=None
tst = "aName { outer { 'inner with opener { and closer } in quoted string' }}"
expr = name + pp.nested_expr(opener="{", closer="}", ignore_expr=None)
singleCharResult = expr.parse_string(tst, parse_all=True)
print(singleCharResult.dump())
# multi-character opener and/or closer with ignore_expr=None
expr = name + pp.nested_expr(opener="{{", closer="}}", ignore_expr=None)
multiCharResult = expr.parse_string(tstMulti, parse_all=True)
print(multiCharResult.dump())
self.assertParseResultsEquals(
singleCharResult,
multiCharResult.as_list(),
msg="using different openers and closers shouldn't affect resulting ParseResults",
)
def testNestedExpressions3(self):
prior_ws_chars = pp.ParserElement.DEFAULT_WHITE_CHARS
with ppt.reset_pyparsing_context():
pp.ParserElement.set_default_whitespace_chars('')
input_str = dedent(
"""\
selector
{
a:b;
c:d;
selector
{
a:b;
c:d;
}
y:z;
}"""
)
print(ppt.with_line_numbers(input_str, 1, 100))
nested_result = pp.nested_expr('{', '}').parse_string("{" + input_str + "}").as_list()
expected_result = [
[
'selector\n',
[
'\n a:b;\n c:d;\n selector\n ',
[
'\n a:b;\n c:d;\n '
],
'\n y:z;\n'
]
]
]
self.assertEqual(nested_result, expected_result)
# make sure things have been put back properly
self.assertEqual(pp.ParserElement.DEFAULT_WHITE_CHARS, prior_ws_chars)
def testNestedExpressions4(self):
allowed = pp.alphas
plot_options_short = pp.nested_expr('[',
']',
content=pp.OneOrMore(pp.Word(allowed) ^ pp.quoted_string)
).set_results_name('plot_options')
self.assertParseAndCheckList(
plot_options_short,
"[slkjdfl sldjf [lsdf'lsdf']]",
[['slkjdfl', 'sldjf', ['lsdf', "'lsdf'"]]]
)
def testNestedExpressionDoesNotOverwriteParseActions(self):
content = pp.Word(pp.nums + " ")
content.add_parse_action(lambda t: None)
orig_pa = content.parseAction[0]
expr = pp.nested_expr(content=content)
assert content.parseAction[0] is orig_pa
def testNestedExpressionRandom(self):
import random
word_chars = pp.alphanums
def get_random_character(_charset=word_chars + " "):
return random.choice(_charset)
def create_random_quoted_string():
quote_char = random.choice(('"', "'"))
yield quote_char
yield from (get_random_character() for _ in range(random.randint(0, 12)))
yield quote_char
def create_random_nested_expression():
yield "["
if random.random() < 0.25:
yield from create_random_quoted_string()
for _ in range(random.randint(0, 16)):
rnd = random.random()
if rnd < 0.25:
yield from create_random_quoted_string()
elif rnd < 0.3:
yield from create_random_nested_expression()
else:
yield from (get_random_character() for _ in range(random.randint(1, 4)))
if random.random() < 0.25:
yield from create_random_quoted_string()
yield "]"
num_reps=150
# simulate nested_expr
LBRACK, RBRACK = pp.Suppress.using_each("[]")
wd = pp.Word(word_chars)
qs = pp.quoted_string()
ls = pp.Forward()
ls <<= pp.Group(LBRACK + (qs | ls | wd)[...] + RBRACK)
def crack_nested_string(s) -> list:
return ls.parse_string(s, parse_all=True).as_list()
expr = pp.nested_expr('[', ']')
for _ in range(num_reps):
nested_str = ''.join(create_random_nested_expression())
# print(nested_str)
cracked_result = crack_nested_string(nested_str)
self.assertParseAndCheckList(
expr,
nested_str,
cracked_result,
f"Failed: {nested_str}, expected {cracked_result}",
verbose = False,
)
# test multi-character nesting delimiters
expr = pp.nested_expr('<<', '>>')
for _ in range(num_reps):
nested_str = ''.join(create_random_nested_expression())
# print(nested_str)
cracked_result = crack_nested_string(nested_str)
nested_str = nested_str.replace("[", "<<").replace("]", ">>")
self.assertParseAndCheckList(
expr,
nested_str,
cracked_result,
f"Failed: {nested_str}, expected {cracked_result}",
verbose = False,
)
# test with no ignore_expr (no quoted string handling)
expr = pp.nested_expr('[', ']', ignore_expr=None)
for _ in range(num_reps):
nested_str = ''.join(create_random_nested_expression())
nested_str = nested_str.replace('"', "").replace("'", "")
# print(nested_str)
cracked_result = crack_nested_string(nested_str)
self.assertParseAndCheckList(
expr,
nested_str,
cracked_result,
f"Failed: {nested_str}, expected {cracked_result}",
verbose = False,
)
# test multi-character nesting delimiters, with no ignore_expr
expr = pp.nested_expr('<<', '>>', ignore_expr=None)
for _ in range(num_reps):
nested_str = ''.join(create_random_nested_expression())
nested_str = nested_str.replace('"', "").replace("'", "")
# print(nested_str)
cracked_result = crack_nested_string(nested_str)
nested_str = nested_str.replace("[", "<<").replace("]", ">>")
self.assertParseAndCheckList(
expr,
nested_str,
cracked_result,
f"Failed: {nested_str}, expected {cracked_result}",
verbose=False,
)
def testWordMinMaxArgs(self):
parsers = [
"A" + pp.Word(pp.nums),
"A" + pp.Word(pp.nums, min=1),
"A" + pp.Word(pp.nums, max=6),
"A" + pp.Word(pp.nums, min=1, max=6),
"A" + pp.Word(pp.nums, min=1),
"A" + pp.Word(pp.nums, min=2),
"A" + pp.Word(pp.nums, min=2, max=6),
pp.Word("A", pp.nums),
pp.Word("A", pp.nums, min=1),
pp.Word("A", pp.nums, max=6),
pp.Word("A", pp.nums, min=1, max=6),
pp.Word("A", pp.nums, min=1),
pp.Word("A", pp.nums, min=2),
pp.Word("A", pp.nums, min=2, max=6),
pp.Word(pp.alphas, pp.nums),
pp.Word(pp.alphas, pp.nums, min=1),
pp.Word(pp.alphas, pp.nums, max=6),
pp.Word(pp.alphas, pp.nums, min=1, max=6),
pp.Word(pp.alphas, pp.nums, min=1),
pp.Word(pp.alphas, pp.nums, min=2),
pp.Word(pp.alphas, pp.nums, min=2, max=6),
]
fails = []
for p in parsers:
print(p, getattr(p, "reString", "..."), end=" ", flush=True)
try:
p.parse_string("A123", parse_all=True)
except Exception as e:
print(" <<< FAIL")
fails.append(p)
else:
print()
if fails:
self.fail(f"{','.join(str(f) for f in fails)} failed to match")
def testWordMinMaxExactArgs(self):
for minarg in range(1, 9):
for maxarg in range(minarg, 10):
with self.subTest(minarg=minarg, maxarg=maxarg):
expr = pp.Word("AB", pp.nums, min=minarg, max=maxarg)
print(minarg, maxarg, expr.reString, end=" ")
trailing = expr.reString.rpartition("]")[-1]
expected_special = {
(1, 1): "",
(1, 2): "?",
(2, 2): "",
}
expected_default = (
f"{{{minarg - 1}}}"
if minarg == maxarg
else f"{{{minarg - 1},{maxarg - 1}}}"
)
expected = expected_special.get((minarg, maxarg), expected_default)
print(trailing == expected)
self.assertEqual(trailing, expected)
self.assertParseAndCheckList(
expr + pp.rest_of_line.suppress(),
"A1234567890",
["A1234567890"[:maxarg]],
)
for exarg in range(1, 9):
with self.subTest(exarg=exarg):
expr = pp.Word("AB", pp.nums, exact=exarg)
print(exarg, expr.reString, end=" ")
trailing = expr.reString.rpartition("]")[-1]
if exarg < 3:
expected = ""
else:
expected = f"{{{exarg - 1}}}"
print(trailing == expected)
self.assertEqual(trailing, expected)
self.assertParseAndCheckList(
expr + pp.rest_of_line.suppress(),
"A1234567890",
["A1234567890"[:exarg]],
)
def testWordMin(self):
# failing tests
for min_val in range(3, 5):
with self.subTest(min_val=min_val):
wd = pp.Word("a", "1", min=min_val)
print(min_val, wd.reString)
with self.assertRaisesParseException():
wd.parse_string("a1")
for min_val in range(2, 5):
with self.subTest(min_val=min_val):
wd = pp.Word("a", min=min_val)
print(min_val, wd.reString)
with self.assertRaisesParseException():
wd.parse_string("a")
for min_val in range(3, 5):
with self.subTest(min_val=min_val):
wd = pp.Word("a", "1", min=min_val)
print(min_val, wd.reString)
with self.assertRaisesParseException():
wd.parse_string("a1")
# passing tests
for min_val in range(2, 5):
with self.subTest(min_val=min_val):
wd = pp.Word("a", min=min_val)
test_string = "a" * min_val
self.assertParseAndCheckList(
wd,
test_string,
[test_string],
msg=f"Word(min={min_val}) failed",
verbose=True,
)
for min_val in range(2, 5):
with self.subTest(min_val=min_val):
wd = pp.Word("a", "1", min=min_val)
test_string = "a" + "1" * (min_val - 1)
self.assertParseAndCheckList(
wd,
test_string,
[test_string],
msg=f"Word(min={min_val}) failed",
verbose=True,
)
def testWordExact(self):
# failing tests
for exact_val in range(2, 5):
with self.subTest(exact_val=exact_val):
wd = pp.Word("a", exact=exact_val)
print(exact_val, wd.reString)
with self.assertRaisesParseException():
wd.parse_string("a")
# passing tests
for exact_val in range(2, 5):
with self.subTest(exact_val=exact_val):
wd = pp.Word("a", exact=exact_val)
test_string = "a" * exact_val
self.assertParseAndCheckList(
wd,
test_string,
[test_string],
msg=f"Word(exact={exact_val}) failed",
verbose=True,
)
def testInvalidMinMaxArgs(self):
with self.assertRaises(ValueError):
wd = pp.Word(min=2, max=1)
def testWordExclude(self):
allButPunc = pp.Word(pp.printables, exclude_chars=".,:;-_!?")
test = "Hello, Mr. Ed, it's Wilbur!"
result = allButPunc.search_string(test).as_list()
print(result)
self.assertEqual(
[["Hello"], ["Mr"], ["Ed"], ["it's"], ["Wilbur"]],
result,
"failed WordExcludeTest",
)
def testWordExclude2(self):
punc_chars = ".,:;-_!?"
all_but_punc = pp.Word(pp.printables, exclude_chars=punc_chars)
all_and_punc = pp.Word(pp.printables)
assert set(punc_chars) & set(all_but_punc.init_chars) == set()
expr = all_but_punc("no_punc*") | all_and_punc("with_punc*")
self.assertParseAndCheckDict(
expr[...],
"Mr. Ed,",
{"no_punc": ["Mr", "Ed"], "with_punc": [".", ","]},
"failed matching with exclude_chars (1)",
)
self.assertParseAndCheckDict(
expr[...],
":Mr. Ed,",
{"no_punc": ["Ed"], "with_punc": [":Mr.", ","]},
"failed matching with exclude_chars (2)",
)
def testWordMinOfZero(self):
"""test a Word with min=0"""
with self.assertRaises(ValueError, msg="expected min 0 to error"):
expr = pp.Word(pp.nums, min=0, max=10)
@staticmethod
def setup_testWordMaxGreaterThanZeroAndAsKeyword():
# fmt: off
bool_operand = (
pp.Word(pp.alphas, max=1, as_keyword=True)
| pp.one_of("True False")
)
test_string = "p q r False"
return SimpleNamespace(**locals())
# fmt: on
def testWordMaxGreaterThanZeroAndAsKeyword1(self):
"""test a Word with max>0 and as_keyword=True"""
setup = self.setup_testWordMaxGreaterThanZeroAndAsKeyword()
result = setup.bool_operand[...].parse_string(setup.test_string, parse_all=True)
self.assertParseAndCheckList(
setup.bool_operand[...],
setup.test_string,
setup.test_string.split(),
msg=f"{__()}Failed to parse Word(max=1, as_keyword=True)",
verbose=True,
)
def testWordMaxGreaterThanZeroAndAsKeyword2(self):
"""test a Word with max>0 and as_keyword=True"""
setup = self.setup_testWordMaxGreaterThanZeroAndAsKeyword()
with self.assertRaisesParseException(
msg=f"{__()}Failed to detect Word with max > 0 and as_keyword=True"
):
setup.bool_operand.parse_string("abc", parse_all=True)
def testCharAsKeyword(self):
"""test a Char with as_keyword=True"""
grade = pp.OneOrMore(pp.Char("ABCDF", as_keyword=True))
# all single char words
result = grade.parse_string("B B C A D", parse_all=True)
print(result)
expected = ["B", "B", "C", "A", "D"]
self.assertParseResultsEquals(
result, expected, msg="issue with Char as_keyword=True"
)
# NOT all single char words
test2 = "B BB C A D"
result2 = grade.parse_string(test2, parse_all=False)
print(result2)
expected2 = ["B"]
self.assertParseResultsEquals(
result2, expected2, msg="issue with Char as_keyword=True parsing 2 chars"
)
def testCharRe(self):
expr = pp.Char("ABCDEFG")
self.assertEqual("[A-G]", expr.reString)
def testCharsNotIn(self):
"""test CharsNotIn initialized with various arguments"""
vowels = "AEIOU"
tst = "bcdfghjklmnpqrstvwxyz"
# default args
consonants = pp.CharsNotIn(vowels)
result = consonants.parse_string(tst, parse_all=True)
print(result)
self.assertParseResultsEquals(
result, [tst], msg="issue with CharsNotIn w/ default args"
)
# min = 0
with self.assertRaises(ValueError, msg="issue with CharsNotIn w/ min=0"):
consonants = pp.CharsNotIn(vowels, min=0)
# max > 0
consonants = pp.CharsNotIn(vowels, max=5)
result = consonants.parse_string(tst, parse_all=False)
print(result)
self.assertParseResultsEquals(
result, [tst[:5]], msg="issue with CharsNotIn w max > 0"
)
# exact > 0
consonants = pp.CharsNotIn(vowels, exact=10)
result = consonants.parse_string(tst[:10], parse_all=True)
print(result)
self.assertParseResultsEquals(
result, [tst[:10]], msg="issue with CharsNotIn w/ exact > 0"
)
# min > length
consonants = pp.CharsNotIn(vowels, min=25)
with self.assertRaisesParseException(msg="issue with CharsNotIn min > tokens"):
result = consonants.parse_string(tst, parse_all=True)
def testParseAll(self):
testExpr = pp.Word("A")
tests = [
("AAAAA", False, True),
("AAAAA", True, True),
("AAABB", False, True),
("AAABB", True, False),
]
for s, parse_allFlag, shouldSucceed in tests:
try:
print(f"'{s}' parse_all={parse_allFlag} (shouldSucceed={shouldSucceed})")
testExpr.parse_string(s, parse_all=parse_allFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test for trailing comments
testExpr.ignore(pp.cpp_style_comment)
tests = [
("AAAAA //blah", False, True),
("AAAAA //blah", True, True),
("AAABB //blah", False, True),
("AAABB //blah", True, False),
]
for s, parse_allFlag, shouldSucceed in tests:
try:
print(f"'{s}' parse_all={parse_allFlag} (shouldSucceed={shouldSucceed})")
testExpr.parse_string(s, parse_all=parse_allFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test with very long expression string
# testExpr = pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != 'B'])[1, ...]
anything_but_an_f = pp.OneOrMore(
pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != "f"])
)
testExpr = pp.Word("012") + anything_but_an_f
tests = [
("00aab", False, True),
("00aab", True, True),
("00aaf", False, True),
("00aaf", True, False),
]
for s, parse_allFlag, shouldSucceed in tests:
try:
print(f"'{s}' parse_all={parse_allFlag} (shouldSucceed={shouldSucceed})")
testExpr.parse_string(s, parse_all=parse_allFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
def testGreedyQuotedStrings(self):
src = """\
"string1", "strin""g2"
'string1', 'string2'
^string1^, ^string2^
<string1>, <string2>"""
testExprs = (
pp.sgl_quoted_string,
pp.dbl_quoted_string,
pp.quoted_string,
pp.QuotedString('"', esc_quote='""'),
pp.QuotedString("'", esc_quote="''"),
pp.QuotedString("^"),
pp.QuotedString("<", end_quote_char=">"),
)
for expr in testExprs:
strs = pp.DelimitedList(expr).search_string(src)
print(strs)
self.assertTrue(
bool(strs), f"no matches found for test expression '{expr}'"
)
for lst in strs:
self.assertEqual(
2, len(lst), f"invalid match found for test expression '{expr}'"
)
src = """'ms1',1,0,'2009-12-22','2009-12-22 10:41:22') ON DUPLICATE KEY UPDATE sent_count = sent_count + 1, mtime = '2009-12-22 10:41:22';"""
tok_sql_quoted_value = pp.QuotedString(
"'", "\\", "''", True, False
) ^ pp.QuotedString('"', "\\", '""', True, False)
tok_sql_computed_value = pp.Word(pp.nums)
tok_sql_identifier = pp.Word(pp.alphas)
val = tok_sql_quoted_value | tok_sql_computed_value | tok_sql_identifier
vals = pp.DelimitedList(val)
print(vals.parse_string(src, parse_all=False))
self.assertEqual(
5,
len(vals.parse_string(src, parse_all=False)),
"error in greedy quote escaping",
)
def testQuotedStringEscapedQuotes(self):
quoted = pp.QuotedString('"', esc_quote='""')
res = quoted.parse_string('"like ""SQL"""', parse_all=True)
print(res.as_list())
self.assertEqual(['like "SQL"'], res.as_list())
# Issue #263 - handle case when the esc_quote is not a repeated character
quoted = pp.QuotedString("y", esc_char=None, esc_quote="xy")
res = quoted.parse_string("yaaay", parse_all=True)
self.assertEqual(["aaa"], res.as_list())
res = quoted.parse_string("yaaaxyaaay", parse_all=True)
print(res.as_list())
self.assertEqual(["aaayaaa"], res.as_list())
def testQuotedStringEscapedExtendedChars(self):
quoted = pp.QuotedString("'")
self.assertParseAndCheckList(
quoted,
"'null: \0 octal: \267 hex: \xb7 unicode: \u00b7'",
['null: \x00 octal: · hex: · unicode: ·'],
"failed to parse embedded numeric escapes",
)
def testWordBoundaryExpressions(self):
ws = pp.WordStart()
we = pp.WordEnd()
vowel = pp.one_of(list("AEIOUY"))
consonant = pp.one_of(list("BCDFGHJKLMNPQRSTVWXZ"))
leadingVowel = ws + vowel
trailingVowel = vowel + we
leadingConsonant = ws + consonant
trailingConsonant = consonant + we
internalVowel = ~ws + vowel + ~we
bnf = leadingVowel | trailingVowel
tests = """\
ABC DEF GHI
JKL MNO PQR
STU VWX YZ """.splitlines()
tests.append("\n".join(tests))
expectedResult = [
[["D", "G"], ["A"], ["C", "F"], ["I"], ["E"], ["A", "I"]],
[["J", "M", "P"], [], ["L", "R"], ["O"], [], ["O"]],
[["S", "V"], ["Y"], ["X", "Z"], ["U"], [], ["U", "Y"]],
[
["D", "G", "J", "M", "P", "S", "V"],
["A", "Y"],
["C", "F", "L", "R", "X", "Z"],
["I", "O", "U"],
["E"],
["A", "I", "O", "U", "Y"],
],
]
for t, expected in zip(tests, expectedResult):
print(t)
results = [
flatten(e.search_string(t).as_list())
for e in [
leadingConsonant,
leadingVowel,
trailingConsonant,
trailingVowel,
internalVowel,
bnf,
]
]
print(results)
print()
self.assertEqual(
expected,
results,
f"Failed WordBoundaryTest, expected {expected}, got {results}",
)
def testWordBoundaryExpressions2(self):
from itertools import product
ws1 = pp.WordStart(pp.alphas)
ws2 = pp.WordStart(word_chars=pp.alphas)
ws3 = pp.WordStart(word_chars=pp.alphas)
we1 = pp.WordEnd(pp.alphas)
we2 = pp.WordEnd(word_chars=pp.alphas)
we3 = pp.WordEnd(word_chars=pp.alphas)
for i, (ws, we) in enumerate(product((ws1, ws2, ws3), (we1, we2, we3))):
try:
expr = "(" + ws + pp.Word(pp.alphas) + we + ")"
expr.parse_string("(abc)", parse_all=True)
except pp.ParseException as pe:
self.fail(f"Test {i} failed: {pe}")
else:
pass
def testRequiredEach(self):
parser = pp.Keyword("bam") & pp.Keyword("boo")
try:
res1 = parser.parse_string("bam boo", parse_all=True)
print(res1.as_list())
res2 = parser.parse_string("boo bam", parse_all=True)
print(res2.as_list())
except ParseException:
failed = True
else:
failed = False
self.assertFalse(failed, "invalid logic in Each")
self.assertEqual(
set(res1),
set(res2),
f"Failed RequiredEachTest, expected {res1.as_list()}"
f" and {res2.as_list} to contain the same words in any order",
)
def testOptionalEachTest1(self):
for the_input in [
"Tal Weiss Major",
"Tal Major",
"Weiss Major",
"Major",
"Major Tal",
"Major Weiss",
"Major Tal Weiss",
]:
print(the_input)
parser1 = (pp.Optional("Tal") + pp.Optional("Weiss")) & pp.Keyword("Major")
parser2 = pp.Optional(
pp.Optional("Tal") + pp.Optional("Weiss")
) & pp.Keyword("Major")
parser3 = (pp.Keyword("Tal") | pp.Keyword("Weiss"))[...] & pp.Keyword("Major")
p1res = parser1.parse_string(the_input, parse_all=True)
p2res = parser2.parse_string(the_input, parse_all=True)
self.assertEqual(
p1res.as_list(),
p2res.as_list(),
f"Each failed to match with nested Optionals, {p1res.as_list()} should match {p2res.as_list()}",
)
p3res = parser3.parse_string(the_input, parse_all=True)
self.assertEqual(
p1res.as_list(),
p3res.as_list(),
f"Each failed to match with repeated Optionals, {p1res.as_list()} should match {p3res.as_list()}",
)
def testOptionalEachTest2(self):
word = pp.Word(pp.alphanums + "_").set_name("word")
with_stmt = "with" + pp.OneOrMore(pp.Group(word("key") + "=" + word("value")))(
"overrides"
)
using_stmt = "using" + pp.Regex("id-[0-9a-f]{8}")("id")
modifiers = pp.Optional(with_stmt("with_stmt")) & pp.Optional(
using_stmt("using_stmt")
)
self.assertEqual("with foo=bar bing=baz using id-deadbeef", modifiers)
self.assertNotEqual(
"with foo=bar bing=baz using id-deadbeef using id-feedfeed", modifiers
)
def testOptionalEachTest3(self):
foo = pp.Literal("foo")
bar = pp.Literal("bar")
openBrace = pp.Suppress(pp.Literal("{"))
closeBrace = pp.Suppress(pp.Literal("}"))
exp = openBrace + (foo[1, ...]("foo") & bar[...]("bar")) + closeBrace
tests = """\
{foo}
{bar foo bar foo bar foo}
""".splitlines()
for test in tests:
test = test.strip()
if not test:
continue
self.assertParseAndCheckList(
exp,
test,
test.strip("{}").split(),
f"failed to parse Each expression {test!r}",
verbose=True,
)
with self.assertRaisesParseException():
exp.parse_string("{bar}", parse_all=True)
def testOptionalEachTest4(self):
expr = (~ppc.iso8601_date + ppc.integer("id")) & (
pp.Group(ppc.iso8601_date)("date*")[...]
)
success, _ = expr.run_tests(
"""
1999-12-31 100 2001-01-01
42
"""
)
self.assertTrue(success)
def testEachWithParseFatalException(self):
option_expr = pp.Keyword("options") - "(" + ppc.integer + ")"
step_expr1 = pp.Keyword("step") - "(" + ppc.integer + ")"
step_expr2 = pp.Keyword("step") - "(" + ppc.integer + "Z" + ")"
step_expr = step_expr1 ^ step_expr2
parser = option_expr & step_expr[...]
tests = [
(
"options(100) step(A)",
"Expected integer, found 'A' (at char 18), (line:1, col:19)",
),
(
"step(A) options(100)",
"Expected integer, found 'A' (at char 5), (line:1, col:6)",
),
(
"options(100) step(100A)",
"""Expected 'Z', found 'A' (at char 21), (line:1, col:22)""",
),
(
"options(100) step(22) step(100ZA)",
"""Expected ')', found 'A' (at char 31), (line:1, col:32)""",
),
]
test_lookup = dict(tests)
success, output = parser.run_tests((t[0] for t in tests), failure_tests=True)
for test_str, result in output:
self.assertEqual(
test_lookup[test_str],
str(result),
f"incorrect exception raised for test string {test_str!r}",
)
def testEachWithMultipleMatch(self):
size = "size" + pp.one_of("S M L XL")
color = pp.Group(
"color" + pp.one_of("red orange yellow green blue purple white black brown")
)
size.set_name("size_spec")
color.set_name("color_spec")
spec0 = size("size") & color[...]("colors")
spec1 = size("size") & color[1, ...]("colors")
for spec in (spec0, spec1):
for test, expected_dict in [
(
"size M color red color yellow",
{
"colors": [["color", "red"], ["color", "yellow"]],
"size": ["size", "M"],
},
),
(
"color green size M color red color yellow",
{
"colors": [
["color", "green"],
["color", "red"],
["color", "yellow"],
],
"size": ["size", "M"],
},
),
]:
result = spec.parse_string(test, parse_all=True)
self.assertParseResultsEquals(result, expected_dict=expected_dict)
def testSumParseResults(self):
samplestr1 = "garbage;DOB 10-10-2010;more garbage\nID PARI12345678;more garbage"
samplestr2 = "garbage;ID PARI12345678;more garbage\nDOB 10-10-2010;more garbage"
samplestr3 = "garbage;DOB 10-10-2010"
samplestr4 = "garbage;ID PARI12345678;more garbage- I am cool"
res1 = "ID:PARI12345678 DOB:10-10-2010 INFO:"
res2 = "ID:PARI12345678 DOB:10-10-2010 INFO:"
res3 = "ID: DOB:10-10-2010 INFO:"
res4 = "ID:PARI12345678 DOB: INFO: I am cool"
dob_ref = "DOB" + pp.Regex(r"\d{2}-\d{2}-\d{4}")("dob")
id_ref = "ID" + pp.Word(pp.alphanums, exact=12)("id")
info_ref = "-" + pp.rest_of_line("info")
person_data = dob_ref | id_ref | info_ref
tests = (samplestr1, samplestr2, samplestr3, samplestr4)
results = (res1, res2, res3, res4)
for test, expected in zip(tests, results):
person = sum(person_data.search_string(test))
result = f"ID:{person.id} DOB:{person.dob} INFO:{person.info}"
print(test)
print(expected)
print(result)
for pd in person_data.search_string(test):
print(pd.dump())
print()
self.assertEqual(
expected,
result,
f"Failed to parse '{test}' correctly, \nexpected '{expected}', got '{result}'",
)
def testMarkInputLine(self):
samplestr1 = "DOB 100-10-2010;more garbage\nID PARI12345678;more garbage"
dob_ref = "DOB" + pp.Regex(r"\d{2}-\d{2}-\d{4}")("dob")
try:
res = dob_ref.parse_string(samplestr1, parse_all=True)
except ParseException as pe:
outstr = pe.mark_input_line()
print(outstr)
self.assertEqual(
"DOB >!<100-10-2010;more garbage",
outstr,
"did not properly create marked input line",
)
else:
self.fail("test construction failed - should have raised an exception")
def testLocatedExpr(self):
# 012345678901234567890123456789012345678901234567890
samplestr1 = "DOB 10-10-2010;more garbage;ID PARI12345678 ;more garbage"
with self.assertWarns(DeprecationWarning):
id_ref = pp.locatedExpr("ID" + pp.Word(pp.alphanums, exact=12)("id"))
res = id_ref.search_string(samplestr1)[0][0]
print(res.dump())
self.assertEqual(
"ID PARI12345678",
samplestr1[res.locn_start : res.locn_end],
"incorrect location calculation",
)
def testLocatedExprUsingLocated(self):
# 012345678901234567890123456789012345678901234567890
samplestr1 = "DOB 10-10-2010;more garbage;ID PARI12345678 ;more garbage"
id_ref = pp.Located("ID" + pp.Word(pp.alphanums, exact=12)("id"))
res = id_ref.search_string(samplestr1)[0]
print(res.dump())
self.assertEqual(
"ID PARI12345678",
samplestr1[res.locn_start : res.locn_end],
"incorrect location calculation",
)
self.assertParseResultsEquals(
res,
[28, ["ID", "PARI12345678"], 43],
{"locn_end": 43, "locn_start": 28, "value": {"id": "PARI12345678"}},
)
self.assertEqual("PARI12345678", res.value.id)
# if Located has a results name, handle appropriately
id_ref = pp.Located("ID" + pp.Word(pp.alphanums, exact=12)("id"))("loc")
res = id_ref.search_string(samplestr1)[0]
print(res.dump())
self.assertEqual(
"ID PARI12345678",
samplestr1[res.loc.locn_start : res.loc.locn_end],
"incorrect location calculation",
)
self.assertParseResultsEquals(
res.loc,
[28, ["ID", "PARI12345678"], 43],
{"locn_end": 43, "locn_start": 28, "value": {"id": "PARI12345678"}},
)
self.assertEqual("PARI12345678", res.loc.value.id)
wd = pp.Word(pp.alphas)
test_string = "ljsdf123lksdjjf123lkkjj1222"
pp_matches = pp.Located(wd).search_string(test_string)
re_matches = find_all_re_matches("[a-z]+", test_string)
for pp_match, re_match in zip(pp_matches, re_matches):
self.assertParseResultsEquals(
pp_match, [re_match.start(), [re_match.group(0)], re_match.end()]
)
print(pp_match)
print(re_match)
print(pp_match.value)
def testPop(self):
source = "AAA 123 456 789 234"
patt = pp.Word(pp.alphas)("name") + pp.Word(pp.nums) * (1,)
result = patt.parse_string(source, parse_all=True)
tests = [
(0, "AAA", ["123", "456", "789", "234"]),
(None, "234", ["123", "456", "789"]),
("name", "AAA", ["123", "456", "789"]),
(-1, "789", ["123", "456"]),
]
for test in tests:
idx, val, remaining = test
if idx is not None:
ret = result.pop(idx)
else:
ret = result.pop()
print("EXP:", val, remaining)
print("GOT:", ret, result.as_list())
print(ret, result.as_list())
self.assertEqual(
val,
ret,
f"wrong value returned, got {ret!r}, expected {val!r}",
)
self.assertEqual(
remaining,
result.as_list(),
f"list is in wrong state after pop, got {result.as_list()!r}, expected {remaining!r}",
)
print()
prevlist = result.as_list()
ret = result.pop("name", default="noname")
print(ret)
print(result.as_list())
self.assertEqual(
"noname",
ret,
f"default value not successfully returned, got {ret!r}, expected {'noname'!r}",
)
self.assertEqual(
prevlist,
result.as_list(),
f"list is in wrong state after pop, got {result.as_list()!r}, expected {remaining!r}",
)
def testPopKwargsErr(self):
"""raise a TypeError in pop by adding invalid named args"""
source = "AAA 123 456 789 234"
patt = pp.Word(pp.alphas)("name") + pp.Word(pp.nums) * (1,)
result = patt.parse_string(source, parse_all=True)
print(result.dump())
with self.assertRaises(TypeError):
result.pop(notDefault="foo")
def testAddCondition(self):
numParser = pp.Word(pp.nums)
numParser.add_parse_action(lambda s, l, t: int(t[0]))
numParser.add_condition(lambda s, l, t: t[0] % 2)
numParser.add_condition(lambda s, l, t: t[0] >= 7)
result = numParser.search_string("1 2 3 4 5 6 7 8 9 10")
print(result.as_list())
self.assertEqual(
[[7], [9]], result.as_list(), "failed to properly process conditions"
)
numParser = pp.Word(pp.nums)
numParser.add_parse_action(lambda s, l, t: int(t[0]))
rangeParser = numParser("from_") + pp.Suppress("-") + numParser("to")
result = rangeParser.search_string("1-4 2-4 4-3 5 6 7 8 9 10")
print(result.as_list())
self.assertEqual(
[[1, 4], [2, 4], [4, 3]],
result.as_list(),
"failed to properly process conditions",
)
rangeParser.add_condition(
lambda t: t.to > t.from_, message="from must be <= to", fatal=False
)
result = rangeParser.search_string("1-4 2-4 4-3 5 6 7 8 9 10")
print(result.as_list())
self.assertEqual(
[[1, 4], [2, 4]], result.as_list(), "failed to properly process conditions"
)
rangeParser = numParser("from_") + pp.Suppress("-") + numParser("to")
rangeParser.add_condition(
lambda t: t.to > t.from_, message="from must be <= to", fatal=True
)
try:
result = rangeParser.search_string("1-4 2-4 4-3 5 6 7 8 9 10")
self.fail("failed to interrupt parsing on fatal condition failure")
except ParseFatalException:
print("detected fatal condition")
def testPatientOr(self):
# Two expressions and a input string which could - syntactically - be matched against
# both expressions. The "Literal" expression is considered invalid though, so this PE
# should always detect the "Word" expression.
def validate(token):
if token[0] == "def":
raise pp.ParseException("signalling invalid token")
return token
a = pp.Word("de").set_name("Word") # .set_debug()
b = pp.Literal("def").set_name("Literal").set_parse_action(validate) # .set_debug()
c = pp.Literal("d").set_name("d") # .set_debug()
# The "Literal" expressions's ParseAction is not executed directly after syntactically
# detecting the "Literal" Expression but only after the Or-decision has been made
# (which is too late)...
try:
result = (a ^ b ^ c).parse_string("def", parse_all=False)
print(result)
self.assertEqual(
["de"],
result.as_list(),
f"failed to select longest match, chose {result}",
)
except ParseException:
failed = True
else:
failed = False
if failed:
self.fail(
"invalid logic in Or, fails on longest match with exception in parse action"
)
# from issue #93
word = pp.Word(pp.alphas).set_name("word")
word_1 = (
pp.Word(pp.alphas).set_name("word_1").add_condition(lambda t: len(t[0]) == 1)
)
a = word + (word_1 + word ^ word)
b = word * 3
c = a ^ b
c.streamline()
print(c)
test_string = "foo bar temp"
result = c.parse_string(test_string, parse_all=True)
print(test_string, "->", result.as_list())
self.assertEqual(
test_string.split(), result.as_list(), "failed to match longest choice"
)
def testEachWithOptionalWithResultsName(self):
result = (pp.Optional("foo")("one") & pp.Optional("bar")("two")).parse_string(
"bar foo", parse_all=True
)
print(result.dump())
self.assertEqual(sorted(["one", "two"]), sorted(result.keys()))
def testUnicodeExpression(self):
z = "a" | pp.Literal("\u1111")
z.streamline()
try:
z.parse_string("b", parse_all=True)
except ParseException as pe:
self.assertEqual(
r"""Expected {'a' | 'ᄑ'}""",
pe.msg,
f"Invalid error message raised, got {pe.msg!r}",
)
def testSetName(self):
a = pp.one_of("a b c")
b = pp.one_of("d e f")
# fmt: off
arith_expr = pp.infix_notation(
pp.Word(pp.nums),
[
(pp.one_of("* /").set_name("* | /"), 2, pp.OpAssoc.LEFT),
(pp.one_of("+ -").set_name("+ | -"), 2, pp.OpAssoc.LEFT),
],
)
arith_expr2 = pp.infix_notation(
pp.Word(pp.nums),
[
(("?", ":"), 3, pp.OpAssoc.LEFT),
]
)
# fmt: on
recursive = pp.Forward()
recursive <<= a + (b + recursive)[...]
tests = [
a,
b,
(a | b),
arith_expr,
arith_expr.expr,
arith_expr2,
arith_expr2.expr,
recursive,
pp.DelimitedList(pp.Word(pp.nums).set_name("int")),
pp.counted_array(pp.Word(pp.nums).set_name("int")),
pp.nested_expr(),
pp.make_html_tags("Z"),
(pp.any_open_tag, pp.any_close_tag),
pp.common_html_entity,
pp.common_html_entity.set_parse_action(pp.replace_html_entity).transform_string(
"lsdjkf <lsdjkf>&'"&xyzzy;"
),
]
expected = map(
str.strip,
"""\
'a' | 'b' | 'c'
'd' | 'e' | 'f'
{'a' | 'b' | 'c' | 'd' | 'e' | 'f'}
W:(0-9)_expression
+ | - operations
W:(0-9)_expression
?: operations
Forward: {'a' | 'b' | 'c' [{'d' | 'e' | 'f' : ...}]...}
int [, int]...
(len) int...
nested () expression
(<Z>, </Z>)
(<any tag>, </any tag>)
common HTML entity
lsdjkf <lsdjkf>&'"&xyzzy;""".splitlines(),
)
for t, e in zip(tests, expected):
with self.subTest("set_name", t=t, e=e):
tname = str(t)
print(tname)
self.assertEqual(
e,
tname,
f"expression name mismatch, expected {e} got {tname}",
)
def testTrimArityExceptionMasking(self):
invalid_message = "<lambda>() missing 1 required positional argument: 't'"
try:
pp.Word("a").set_parse_action(lambda t: t[0] + 1).parse_string(
"aaa", parse_all=True
)
except Exception as e:
exc_msg = str(e)
self.assertNotEqual(
exc_msg,
invalid_message,
"failed to catch TypeError thrown in _trim_arity",
)
def testTrimArityExceptionMaskingTest2(self):
# construct deep call tree
def A():
import traceback
traceback.print_stack(limit=2)
invalid_message = "<lambda>() missing 1 required positional argument: 't'"
try:
pp.Word("a").set_parse_action(lambda t: t[0] + 1).parse_string(
"aaa", parse_all=True
)
except Exception as e:
exc_msg = str(e)
self.assertNotEqual(
exc_msg,
invalid_message,
"failed to catch TypeError thrown in _trim_arity",
)
def B():
A()
def C():
B()
def D():
C()
def E():
D()
def F():
E()
def G():
F()
def H():
G()
def J():
H()
def K():
J()
K()
def testClearParseActions(self):
realnum = ppc.real()
self.assertEqual(
3.14159,
realnum.parse_string("3.14159", parse_all=True)[0],
"failed basic real number parsing",
)
# clear parse action that converts to float
realnum.set_parse_action(None)
self.assertEqual(
"3.14159",
realnum.parse_string("3.14159", parse_all=True)[0],
"failed clearing parse action",
)
# add a new parse action that tests if a '.' is prsent
realnum.add_parse_action(lambda t: "." in t[0])
self.assertEqual(
True,
realnum.parse_string("3.14159", parse_all=True)[0],
"failed setting new parse action after clearing parse action",
)
def testOneOrMoreStop(self):
test = "BEGIN aaa bbb ccc END"
BEGIN, END = map(pp.Keyword, "BEGIN,END".split(","))
body_word = pp.Word(pp.alphas).set_name("word")
for ender in (END, "END", pp.CaselessKeyword("END")):
expr = BEGIN + pp.OneOrMore(body_word, stop_on=ender) + END
self.assertEqual(
expr, test, f"Did not successfully stop on ending expression {ender!r}"
)
expr = BEGIN + body_word[1, ...].stopOn(ender) + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
expr = BEGIN + body_word[1, ...:ender] + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
expr = BEGIN + body_word[(1, ...):ender] + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
number = pp.Word(pp.nums + ",.()").set_name("number with optional commas")
parser = pp.OneOrMore(pp.Word(pp.alphanums + "-/."), stop_on=number)(
"id"
).set_parse_action(" ".join) + number("data")
self.assertParseAndCheckList(
parser,
" XXX Y/123 1,234.567890",
["XXX Y/123", "1,234.567890"],
f"Did not successfully stop on ending expression {number!r}",
verbose=True,
)
def testZeroOrMoreStop(self):
test = "BEGIN END"
BEGIN, END = map(pp.Keyword, "BEGIN,END".split(","))
body_word = pp.Word(pp.alphas).set_name("word")
for ender in (END, "END", pp.CaselessKeyword("END")):
expr = BEGIN + pp.ZeroOrMore(body_word, stop_on=ender) + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
expr = BEGIN + body_word[...].stopOn(ender) + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
expr = BEGIN + body_word[...:ender] + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
expr = BEGIN + body_word[:ender] + END
self.assertParseAndCheckList(
expr,
test,
test.split(),
f"Did not successfully stop on ending expression {ender!r}",
)
def testNestedAsDict(self):
equals = pp.Literal("=").suppress()
lbracket = pp.Literal("[").suppress()
rbracket = pp.Literal("]").suppress()
lbrace = pp.Literal("{").suppress()
rbrace = pp.Literal("}").suppress()
value_dict = pp.Forward()
value_list = pp.Forward()
value_string = pp.Word(pp.alphanums + "@. ")
value = value_list ^ value_dict ^ value_string
values = pp.Group(pp.DelimitedList(value, ","))
# ~ values = DelimitedList(value, ",").set_parse_action(lambda toks: [toks.as_list()])
value_list <<= lbracket + values + rbracket
identifier = pp.Word(pp.alphanums + "_.")
assignment = pp.Group(identifier + equals + pp.Optional(value))
assignments = pp.Dict(pp.DelimitedList(assignment, ";"))
value_dict <<= lbrace + assignments + rbrace
response = assignments
rsp = (
"username=goat; errors={username=[already taken, too short]}; empty_field="
)
result_dict = response.parse_string(rsp, parse_all=True).as_dict()
print(result_dict)
self.assertEqual(
"goat",
result_dict["username"],
"failed to process string in ParseResults correctly",
)
self.assertEqual(
["already taken", "too short"],
result_dict["errors"]["username"],
"failed to process nested ParseResults correctly",
)
def testTraceParseActionDecorator(self):
@pp.trace_parse_action
def convert_to_int(t):
return int(t[0])
class Z:
def __call__(self, other):
return other[0] * 1000
integer = pp.Word(pp.nums).add_parse_action(convert_to_int)
integer.add_parse_action(pp.trace_parse_action(lambda t: t[0] * 10))
integer.add_parse_action(pp.trace_parse_action(Z()))
integer.parse_string("132", parse_all=True)
def testTraceParseActionDecorator_with_exception(self):
@pp.trace_parse_action
def convert_to_int_raising_type_error(t):
return int(t[0]) + ".000"
@pp.trace_parse_action
def convert_to_int_raising_index_error(t):
return int(t[1])
@pp.trace_parse_action
def convert_to_int_raising_value_error(t):
a, b = t[0]
return int(t[1])
@pp.trace_parse_action
def convert_to_int_raising_parse_exception(t):
pp.Word(pp.alphas).parse_string("123")
for pa, expected_message in (
(convert_to_int_raising_type_error, "TypeError:"),
(convert_to_int_raising_index_error, "IndexError:"),
(convert_to_int_raising_value_error, "ValueError:"),
(convert_to_int_raising_parse_exception, "ParseException:"),
):
print(f"Using parse action {pa.__name__!r}")
integer = pp.Word(pp.nums).set_parse_action(pa)
stderr_capture = StringIO()
try:
with contextlib.redirect_stderr(stderr_capture):
integer.parse_string("132", parse_all=True)
except Exception as exc:
print(f"Exception raised: {type(exc).__name__}: {exc}")
else:
print("No exception raised")
stderr_text = stderr_capture.getvalue()
print(stderr_text)
self.assertTrue(
expected_message in stderr_text,
f"Expected exception type {expected_message!r} not found in trace_parse_action output",
)
def testRunTests(self):
integer = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
intrange = integer("start") + "-" + integer("end")
intrange.add_condition(
lambda t: t.end > t.start,
message="invalid range, start must be <= end",
fatal=True,
)
intrange.add_parse_action(lambda t: list(range(t.start, t.end + 1)))
indices = pp.DelimitedList(intrange | integer)
indices.add_parse_action(lambda t: sorted(set(t)))
tests = """\
# normal data
1-3,2-4,6,8-10,16
# lone integer
11"""
results = indices.run_tests(tests, print_results=False)[1]
expectedResults = [[1, 2, 3, 4, 6, 8, 9, 10, 16], [11]]
for res, expected in zip(results, expectedResults):
print(res[1].as_list())
print(expected)
self.assertEqual(expected, res[1].as_list(), "failed test: " + str(expected))
tests = """\
# invalid range
1-2, 3-1, 4-6, 7, 12
"""
success, _ = indices.run_tests(tests, print_results=False, failure_tests=True)
self.assertTrue(success, "failed to raise exception on improper range test")
def testRunTestsPostParse(self):
integer = ppc.integer
fraction = integer("numerator") + "/" + integer("denominator")
accum = []
def eval_fraction(test, result):
accum.append((test, result.as_list()))
return f"eval: {result.numerator / result.denominator}"
success, _ = fraction.run_tests(
"""\
1/2
1/0
""",
post_parse=eval_fraction,
)
self.assertTrue(success, "failed to parse fractions in RunTestsPostParse")
expected_accum = [("1/2", [1, "/", 2]), ("1/0", [1, "/", 0])]
self.assertEqual(
expected_accum, accum, "failed to call post_parse method during run_tests"
)
def testConvertToDateErr(self):
"""raise a ParseException in convert_to_date with incompatible date str"""
expr = pp.Word(pp.alphanums + "-")
expr.add_parse_action(ppc.convert_to_date())
with self.assertRaisesParseException():
expr.parse_string("1997-07-error", parse_all=True)
def testConvertToDatetimeErr(self):
"""raise a ParseException in convert_to_datetime with incompatible datetime str"""
expr = pp.Word(pp.alphanums + "-")
expr.add_parse_action(ppc.convert_to_datetime())
with self.assertRaisesParseException():
expr.parse_string("1997-07-error", parse_all=True)
def testCommonExpressions(self):
import ast
with self.subTest("MAC address success run_tests"):
success, _ = ppc.mac_address.run_tests(
"""
AA:BB:CC:DD:EE:FF
AA.BB.CC.DD.EE.FF
AA-BB-CC-DD-EE-FF
"""
)
self.assertTrue(success, "error in parsing valid MAC address")
with self.subTest("MAC address expected failure run_tests"):
success, _ = ppc.mac_address.run_tests(
"""
# mixed delimiters
AA.BB:CC:DD:EE:FF
""",
failure_tests=True,
)
self.assertTrue(success, "error in detecting invalid mac address")
with self.subTest("IPv4 address success run_tests"):
success, _ = ppc.ipv4_address.run_tests(
"""
0.0.0.0
1.1.1.1
127.0.0.1
1.10.100.199
255.255.255.255
"""
)
self.assertTrue(success, "error in parsing valid IPv4 address")
with self.subTest("IPv4 address expected failure run_tests"):
success, _ = ppc.ipv4_address.run_tests(
"""
# out of range value
256.255.255.255
""",
failure_tests=True,
)
self.assertTrue(success, "error in detecting invalid IPv4 address")
with self.subTest("IPv6 address success run_tests"):
success, _ = ppc.ipv6_address.run_tests(
"""
2001:0db8:85a3:0000:0000:8a2e:0370:7334
2134::1234:4567:2468:1236:2444:2106
0:0:0:0:0:0:A00:1
1080::8:800:200C:417A
::A00:1
# loopback address
::1
# the null address
::
# ipv4 compatibility form
::ffff:192.168.0.1
"""
)
self.assertTrue(success, "error in parsing valid IPv6 address")
with self.subTest("IPv6 address expected failure run_tests"):
success, _ = ppc.ipv6_address.run_tests(
"""
# too few values
1080:0:0:0:8:800:200C
# too many ::'s, only 1 allowed
2134::1234:4567::2444:2106
""",
failure_tests=True,
)
self.assertTrue(success, "error in detecting invalid IPv6 address")
with self.subTest("ppc.number success run_tests"):
success, _ = ppc.number.run_tests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
self.assertTrue(success, "error in parsing valid numerics")
with self.subTest("ppc.sci_real success run_tests"):
success, _ = ppc.sci_real.run_tests(
"""
1e12
-1e12
3.14159
6.02e23
"""
)
self.assertTrue(success, "error in parsing valid scientific notation reals")
# any int or real number, returned as float
with self.subTest("ppc.fnumber success run_tests"):
success, _ = ppc.fnumber.run_tests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
self.assertTrue(success, "error in parsing valid numerics")
with self.subTest("ppc.ieee_float success run_tests"):
success, _ = ppc.ieee_float.run_tests(
"""
100
3.14159
6.02e23
1E-12
0
-0
NaN
-nan
inf
-Infinity
"""
)
self.assertTrue(success, "error in parsing valid floating-point literals")
with self.subTest("ppc.iso8601_date success run_tests"):
success, results = ppc.iso8601_date.run_tests(
"""
1997
1997-07
1997-07-16
"""
)
self.assertTrue(success, "error in parsing valid iso8601_date")
expected = [
("1997", None, None),
("1997", "07", None),
("1997", "07", "16"),
]
for r, exp in zip(results, expected):
self.assertEqual(
exp,
(r[1].year, r[1].month, r[1].day),
"failed to parse date into fields",
)
with self.subTest("ppc.iso8601_date conversion success run_tests"):
success, results = (
ppc.iso8601_date()
.add_parse_action(ppc.convert_to_date())
.run_tests(
"""
1997-07-16
"""
)
)
self.assertTrue(
success, "error in parsing valid iso8601_date with parse action"
)
self.assertEqual(
datetime.date(1997, 7, 16),
results[0][1][0],
"error in parsing valid iso8601_date with parse action - incorrect value",
)
with self.subTest("ppc.iso8601_datetime success run_tests"):
success, results = ppc.iso8601_datetime.run_tests(
"""
1997-07-16T19:20+01:00
1997-07-16T19:20:30+01:00
1997-07-16T19:20:30.45Z
1997-07-16 19:20:30.45
"""
)
self.assertTrue(success, "error in parsing valid iso8601_datetime")
with self.subTest("ppc.iso8601_datetime conversion success run_tests"):
success, results = (
ppc.iso8601_datetime()
.add_parse_action(ppc.convert_to_datetime())
.run_tests(
"""
1997-07-16T19:20:30.45
"""
)
)
self.assertTrue(success, "error in parsing valid iso8601_datetime")
self.assertEqual(
datetime.datetime(1997, 7, 16, 19, 20, 30, 450000),
results[0][1][0],
"error in parsing valid iso8601_datetime - incorrect value",
)
with self.subTest("ppc.uuid success run_tests"):
success, _ = ppc.uuid.run_tests(
"""
123e4567-e89b-12d3-a456-426655440000
"""
)
self.assertTrue(success, "failed to parse valid uuid")
with self.subTest("ppc.fraction success run_tests"):
success, _ = ppc.fraction.run_tests(
"""
1/2
-15/16
-3/-4
"""
)
self.assertTrue(success, "failed to parse valid fraction")
with self.subTest("ppc.mixed_integer success run_tests"):
success, _ = ppc.mixed_integer.run_tests(
"""
1/2
-15/16
-3/-4
1 1/2
2 -15/16
0 -3/-4
12
"""
)
self.assertTrue(success, "failed to parse valid mixed integer")
with self.subTest("ppc.number success run_tests"):
success, results = ppc.number.run_tests(
"""
100
-3
1.732
-3.14159
6.02e23"""
)
self.assertTrue(success, "failed to parse numerics")
for test, result in results:
expected = ast.literal_eval(test)
self.assertEqual(
expected,
result[0],
f"numeric parse failed (wrong value) ({result[0]} should be {expected})",
)
self.assertEqual(
type(expected),
type(result[0]),
f"numeric parse failed (wrong type) ({type(result[0])} should be {type(expected)})",
)
def testCommonUrl(self):
url_good_tests = """\
http://foo.com/blah_blah
http://foo.com/blah_blah/
http://foo.com/blah_blah_(wikipedia)
http://foo.com/blah_blah_(wikipedia)_(again)
http://www.example.com/wpstyle/?p=364
https://www.example.com/foo/?bar=baz&inga=42&quux
http://✪df.ws/123
http://userid:password@example.com:8080
http://userid:password@example.com:8080/
http://userid@example.com
http://userid@example.com/
http://userid@example.com:8080
http://userid@example.com:8080/
http://userid:password@example.com
http://userid:password@example.com/
http://142.42.1.1/
http://142.42.1.1:8080/
http://➡.ws/䨹
http://⌘.ws
http://⌘.ws/
http://foo.com/blah_(wikipedia)#cite-1
http://foo.com/blah_(wikipedia)_blah#cite-1
http://foo.com/unicode_(✪)_in_parens
http://foo.com/(something)?after=parens
http://☺.damowmow.com/
http://code.google.com/events/#&product=browser
http://j.mp
ftp://foo.bar/baz
http://foo.bar/?q=Test%20URL-encoded%20stuff
http://مثال.إختبار
"""
success, report = ppc.url.run_tests(url_good_tests)
self.assertTrue(success)
url_bad_tests = """\
http://
http://.
http://..
http://../
http://?
http://??
http://??/
http://#
http://##
http://##/
# skip: http://foo.bar?q=Spaces should be encoded
//
//a
///a
///
http:///a
foo.com
rdar://1234
h://test
http:// shouldfail.com
:// should fail
http://foo.bar/foo(bar)baz quux
ftps://foo.bar/
http://-error-.invalid/
# skip: http://a.b--c.de/
http://-a.b.co
http://a.b-.co
http://0.0.0.0
http://10.1.1.0
http://10.1.1.255
http://224.1.1.1
http://1.1.1.1.1
http://123.123.123
http://3628126748
http://.www.foo.bar/
# skip: http://www.foo.bar./
http://.www.foo.bar./
http://10.1.1.1
"""
success, report = ppc.url.run_tests(url_bad_tests, failure_tests=True)
self.assertTrue(success)
def testCommonUrlParts(self):
from urllib.parse import urlparse
sample_url = "https://bob:secret@www.example.com:8080/path/to/resource?filter=int#book-mark"
parts = urlparse(sample_url)
expected = {
"scheme": parts.scheme,
"auth": f"{parts.username}:{parts.password}",
"host": parts.hostname,
"port": str(parts.port),
"path": parts.path,
"query": parts.query,
"fragment": parts.fragment,
"url": sample_url,
}
self.assertParseAndCheckDict(ppc.url, sample_url, expected, verbose=True)
def testCommonUrlExprs(self):
def extract_parts(s, split=" "):
return [[_.strip(split)] for _ in s.strip(split).split(split)]
test_string = "http://example.com https://blah.org "
self.assertParseAndCheckList(
pp.Group(ppc.url)[...], test_string, extract_parts(test_string)
)
test_string = test_string.replace(" ", " , ")
self.assertParseAndCheckList(
pp.DelimitedList(pp.Group(ppc.url), allow_trailing_delim=True),
test_string,
extract_parts(test_string, " , "),
)
def testNumericExpressions(self):
# disable parse actions that do type conversion so we don't accidentally trigger
# conversion exceptions when what we want to check is the parsing expression
real = ppc.real().set_parse_action(None)
sci_real = ppc.sci_real().set_parse_action(None)
signed_integer = ppc.signed_integer().set_parse_action(None)
from itertools import product
def make_tests():
leading_sign = ["+", "-", ""]
leading_digit = ["0", ""]
dot = [".", ""]
decimal_digit = ["1", ""]
e = ["e", "E", ""]
e_sign = ["+", "-", ""]
e_int = ["22", ""]
stray = ["9", ".", ""]
seen = set()
seen.add("")
for parts in product(
leading_sign,
stray,
leading_digit,
dot,
decimal_digit,
stray,
e,
e_sign,
e_int,
stray,
):
parts_str = "".join(parts).strip()
if parts_str in seen:
continue
seen.add(parts_str)
yield parts_str
print(len(seen) - 1, "tests produced")
# collect tests into valid/invalid sets, depending on whether they evaluate to valid Python floats or ints
valid_ints = set()
valid_reals = set()
valid_sci_reals = set()
invalid_ints = set()
invalid_reals = set()
invalid_sci_reals = set()
# check which strings parse as valid floats or ints, and store in related valid or invalid test sets
for test_str in make_tests():
if "." in test_str or "e" in test_str.lower():
try:
float(test_str)
except ValueError:
invalid_sci_reals.add(test_str)
if "e" not in test_str.lower():
invalid_reals.add(test_str)
else:
valid_sci_reals.add(test_str)
if "e" not in test_str.lower():
valid_reals.add(test_str)
try:
int(test_str)
except ValueError:
invalid_ints.add(test_str)
else:
valid_ints.add(test_str)
# now try all the test sets against their respective expressions
all_pass = True
suppress_results = {"print_results": False}
for expr, tests, is_fail, fn in zip(
[real, sci_real, signed_integer] * 2,
[
valid_reals,
valid_sci_reals,
valid_ints,
invalid_reals,
invalid_sci_reals,
invalid_ints,
],
[False, False, False, True, True, True],
[float, float, int] * 2,
):
#
# success, test_results = expr.run_tests(sorted(tests, key=len), failure_tests=is_fail, **suppress_results)
# filter_result_fn = (lambda r: isinstance(r, Exception),
# lambda r: not isinstance(r, Exception))[is_fail]
# print(expr, ('FAIL', 'PASS')[success], "{}valid tests ({})".format(len(tests),
# 'in' if is_fail else ''))
# if not success:
# all_pass = False
# for test_string, result in test_results:
# if filter_result_fn(result):
# try:
# test_value = fn(test_string)
# except ValueError as ve:
# test_value = str(ve)
# print("{!r}: {} {} {}".format(test_string, result,
# expr.matches(test_string, parse_all=True), test_value))
success = True
for t in tests:
if expr.matches(t, parse_all=True):
if is_fail:
print(t, "should fail but did not")
success = False
else:
if not is_fail:
print(t, "should not fail but did")
success = False
print(
expr,
("FAIL", "PASS")[success],
f"{'in' if is_fail else ''}valid tests ({len(tests)})",
)
all_pass = all_pass and success
self.assertTrue(all_pass, "failed one or more numeric tests")
def testTokenMap(self):
parser = pp.OneOrMore(pp.Word(pp.hexnums)).set_parse_action(pp.token_map(int, 16))
success, report = parser.run_tests(
"""
00 11 22 aa FF 0a 0d 1a
"""
)
self.assertRunTestResults(
(success, report),
[([0, 17, 34, 170, 255, 10, 13, 26], "token_map parse action failed")],
msg="failed to parse hex integers",
)
def testParseFile(self):
s = """
123 456 789
"""
from pathlib import Path
integer = ppc.integer
test_parser = integer[1, ...]
input_file_as_stringio = StringIO(s)
input_file_as_str = "tests/parsefiletest_input_file.txt"
input_file_as_path = Path(input_file_as_str)
expected_list = [int(i) for i in s.split()]
for input_file in (
input_file_as_stringio,
input_file_as_str,
input_file_as_path,
):
with self.subTest(input_file=input_file):
print(f"parse_file() called with {type(input_file).__name__}")
results = test_parser.parse_file(input_file)
print(results)
self.assertEqual(expected_list, results.as_list())
def testHTMLStripper(self):
sample = """
<html>
Here is some sample <i>HTML</i> text.
</html>
"""
read_everything = pp.original_text_for(pp.OneOrMore(pp.Word(pp.printables)))
read_everything.add_parse_action(ppc.strip_html_tags)
result = read_everything.parse_string(sample, parse_all=True)
self.assertEqual("Here is some sample HTML text.", result[0].strip())
def testExprSplitter(self):
expr = pp.Literal(";") + pp.Empty()
expr.ignore(pp.quoted_string)
expr.ignore(pp.python_style_comment)
sample = """
def main():
this_semi_does_nothing();
neither_does_this_but_there_are_spaces_afterward();
a = "a;b"; return a # this is a comment; it has a semicolon!
def b():
if False:
z=1000;b("; in quotes"); c=200;return z
return ';'
class Foo(object):
def bar(self):
'''a docstring; with a semicolon'''
a = 10; b = 11; c = 12
# this comment; has several; semicolons
if self.spam:
x = 12; return x # so; does; this; one
x = 15;;; y += x; return y
def baz(self):
return self.bar
"""
expected = [
[" this_semi_does_nothing()", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ""],
[
' a = "a;b"',
"return a # this is a comment; it has a semicolon!",
],
[" z=1000", 'b("; in quotes")', "c=200", "return z"],
[" return ';'"],
[" '''a docstring; with a semicolon'''"],
[" a = 10", "b = 11", "c = 12"],
[" # this comment; has several; semicolons"],
[" x = 12", "return x # so; does; this; one"],
[" x = 15", "", "", "y += x", "return y"],
]
exp_iter = iter(expected)
for line in filter(lambda ll: ";" in ll, sample.splitlines()):
print(str(list(expr.split(line))) + ",")
self.assertEqual(
next(exp_iter), list(expr.split(line)), "invalid split on expression"
)
print()
expected = [
[" this_semi_does_nothing()", ";", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ";", ""],
[
' a = "a;b"',
";",
"return a # this is a comment; it has a semicolon!",
],
[
" z=1000",
";",
'b("; in quotes")',
";",
"c=200",
";",
"return z",
],
[" return ';'"],
[" '''a docstring; with a semicolon'''"],
[" a = 10", ";", "b = 11", ";", "c = 12"],
[" # this comment; has several; semicolons"],
[" x = 12", ";", "return x # so; does; this; one"],
[
" x = 15",
";",
"",
";",
"",
";",
"y += x",
";",
"return y",
],
]
exp_iter = iter(expected)
for line in filter(lambda ll: ";" in ll, sample.splitlines()):
print(str(list(expr.split(line, include_separators=True))) + ",")
self.assertEqual(
next(exp_iter),
list(expr.split(line, include_separators=True)),
"invalid split on expression",
)
print()
expected = [
[" this_semi_does_nothing()", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ""],
[
' a = "a;b"',
"return a # this is a comment; it has a semicolon!",
],
[" z=1000", 'b("; in quotes"); c=200;return z'],
[" a = 10", "b = 11; c = 12"],
[" x = 12", "return x # so; does; this; one"],
[" x = 15", ";; y += x; return y"],
]
exp_iter = iter(expected)
for line in sample.splitlines():
pieces = list(expr.split(line, maxsplit=1))
print(str(pieces) + ",")
if len(pieces) == 2:
exp = next(exp_iter)
self.assertEqual(
exp, pieces, "invalid split on expression with maxSplits=1"
)
elif len(pieces) == 1:
self.assertEqual(
0,
len(expr.search_string(line)),
"invalid split with maxSplits=1 when expr not present",
)
else:
print("\n>>> " + line)
self.fail("invalid split on expression with maxSplits=1, corner case")
def testParseFatalException(self):
with self.assertRaisesParseException(
exc_type=ParseFatalException, msg="failed to raise ErrorStop exception"
):
expr = "ZZZ" - pp.Word(pp.nums)
expr.parse_string("ZZZ bad", parse_all=True)
def testParseFatalException2(self):
# Fatal exception raised in MatchFirst should not be superseded later non-fatal exceptions
# addresses Issue #251
def raise_exception(tokens):
raise pp.ParseSyntaxException("should raise here")
test = pp.MatchFirst(
(
pp.pyparsing_common.integer + pp.pyparsing_common.identifier
).set_parse_action(raise_exception)
| pp.pyparsing_common.number
)
with self.assertRaisesParseException(pp.ParseFatalException):
test.parse_string("1s", parse_all=True)
def testParseFatalException3(self):
# Fatal exception raised in MatchFirst should not be superseded later non-fatal exceptions
# addresses Issue #251
test = pp.MatchFirst(
(pp.pyparsing_common.integer - pp.pyparsing_common.identifier)
| pp.pyparsing_common.integer
)
with self.assertRaisesParseException(pp.ParseFatalException):
test.parse_string("1", parse_all=True)
def testInlineLiteralsUsing(self):
wd = pp.Word(pp.alphas)
pp.ParserElement.inline_literals_using(pp.Suppress)
result = (wd + "," + wd + pp.one_of("! . ?")).parse_string(
"Hello, World!", parse_all=True
)
self.assertEqual(3, len(result), "inline_literals_using(Suppress) failed!")
pp.ParserElement.inline_literals_using(pp.Literal)
result = (wd + "," + wd + pp.one_of("! . ?")).parse_string(
"Hello, World!", parse_all=True
)
self.assertEqual(4, len(result), "inline_literals_using(Literal) failed!")
pp.ParserElement.inline_literals_using(pp.CaselessKeyword)
self.assertParseAndCheckList(
"SELECT" + wd + "FROM" + wd,
"select color from colors",
expected_list=["SELECT", "color", "FROM", "colors"],
msg="inline_literals_using(CaselessKeyword) failed!",
verbose=True,
)
pp.ParserElement.inline_literals_using(pp.CaselessLiteral)
self.assertParseAndCheckList(
"SELECT" + wd + "FROM" + wd,
"select color from colors",
expected_list=["SELECT", "color", "FROM", "colors"],
msg="inline_literals_using(CaselessLiteral) failed!",
verbose=True,
)
integer = pp.Word(pp.nums)
pp.ParserElement.inline_literals_using(pp.Literal)
date_str = integer("year") + "/" + integer("month") + "/" + integer("day")
self.assertParseAndCheckList(
date_str,
"1999/12/31",
expected_list=["1999", "/", "12", "/", "31"],
msg="inline_literals_using(example 1) failed!",
verbose=True,
)
# change to Suppress
pp.ParserElement.inline_literals_using(pp.Suppress)
date_str = integer("year") + "/" + integer("month") + "/" + integer("day")
self.assertParseAndCheckList(
date_str,
"1999/12/31",
expected_list=["1999", "12", "31"],
msg="inline_literals_using(example 2) failed!",
verbose=True,
)
def testCloseMatch(self):
searchseq = pp.CloseMatch("ATCATCGAATGGA", 2)
_, results = searchseq.run_tests(
"""
ATCATCGAATGGA
XTCATCGAATGGX
ATCATCGAAXGGA
ATCAXXGAATGGA
ATCAXXGAATGXA
ATCAXXGAATGG
"""
)
expected = ([], [0, 12], [9], [4, 5], None, None)
for r, exp in zip(results, expected):
if exp is not None:
self.assertEqual(
exp,
r[1].mismatches,
f"fail CloseMatch between {searchseq.match_string!r} and {r[0]!r}",
)
print(
r[0],
(
f"exc: {r[1]}"
if exp is None and isinstance(r[1], Exception)
else ("no match", "match")[r[1].mismatches == exp]
),
)
def testCloseMatchCaseless(self):
searchseq = pp.CloseMatch("ATCATCGAATGGA", 2, caseless=True)
_, results = searchseq.run_tests(
"""
atcatcgaatgga
xtcatcgaatggx
atcatcgaaxgga
atcaxxgaatgga
atcaxxgaatgxa
atcaxxgaatgg
"""
)
expected = ([], [0, 12], [9], [4, 5], None, None)
for r, exp in zip(results, expected):
if exp is not None:
self.assertEqual(
exp,
r[1].mismatches,
f"fail CaselessCloseMatch between {searchseq.match_string!r} and {r[0]!r}",
)
print(
r[0],
(
f"exc: {r[1]}"
if exp is None and isinstance(r[1], Exception)
else ("no match", "match")[r[1].mismatches == exp]
),
)
def testDefaultKeywordChars(self):
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
pp.Keyword("start").parse_string("start1000", parse_all=True)
try:
pp.Keyword("start", ident_chars=pp.alphas).parse_string(
"start1000", parse_all=False
)
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with ppt.reset_pyparsing_context():
pp.Keyword.set_default_keyword_chars(pp.alphas)
try:
pp.Keyword("start").parse_string("start1000", parse_all=False)
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
pp.CaselessKeyword("START").parse_string("start1000", parse_all=False)
try:
pp.CaselessKeyword("START", ident_chars=pp.alphas).parse_string(
"start1000", parse_all=False
)
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with ppt.reset_pyparsing_context():
pp.Keyword.set_default_keyword_chars(pp.alphas)
try:
pp.CaselessKeyword("START").parse_string("start1000", parse_all=False)
except pp.ParseException:
self.assertTrue(
False, "failed to match keyword using updated keyword chars"
)
def testKeywordCopyIdentChars(self):
a_keyword = pp.Keyword("start", ident_chars="_")
b_keyword = a_keyword.copy()
self.assertEqual(a_keyword.ident_chars, b_keyword.ident_chars)
def testCopyLiteralAttrs(self):
lit = pp.Literal("foo").leave_whitespace()
lit2 = lit.copy()
self.assertFalse(lit2.skipWhitespace)
lit3 = lit2.ignore_whitespace().copy()
self.assertTrue(lit3.skipWhitespace)
def testWordParseImpl(self):
ppc = pp.common
ppu = pp.unicode
ident_exprs = [
ppc.identifier,
pp.Word(pp.alphas),
pp.Literal("identifier"),
pp.Regex(r"[a-zA-Z_][a-zA-Z0-9_]*"),
pp.Regex(r"[a-zA-Z_][a-zA-Z0-9_]*", as_match=True),
pp.Regex(r"[a-zA-Z_][a-zA-Z0-9_]*", as_group_list=True),
ppu.BMP.identifier,
pp.Char(pp.alphas),
pp.Keyword("identifier"),
]
for ident in ident_exprs:
with self.subTest(msg=f"Using expression type {type(ident).__name__}"):
custom_id = ident().set_name("standard identifier").copy().set_name("custom identifier")
with self.assertRaisesParseException(expected_msg="Expected custom identifier"):
custom_id.parse_string("1", parse_all=True)
with self.subTest(msg="Using integer"):
custom_int = ppc.integer().set_name("standard integer").copy().set_name("custom integer")
with self.assertRaisesParseException(expected_msg="Expected custom integer"):
custom_int.parse_string("z", parse_all=True)
def testWordCopyWhenWordCharsIncludeSpace(self):
# Issue #618 - with copy()
word_with_space = pp.Word(pp.alphas + " ")
word_with_space.parse_string("ABC")
# no longer raises exception
word_with_space.copy().parse_string("ABC")
def testWordCopyWhenWordCharsIncludeSpace2(self):
# Issue #618 - with DelimitedList()
element = pp.QuotedString('"') | pp.Combine(pp.Word(' abcdefghijklmnopqrstuvwxyz'))
element.parse_string("abc")
element_list = pp.DelimitedList(element)
# no longer raises exception
element_list.parse_string("abc")
def testWordCopyWhenWordCharsIncludeSpace3(self):
# Issue #618 - with results name
word_with_space = pp.Word(pp.alphas + " ")
word_with_space.parse_string("ABC")
# no longer raises exception
word_with_space("trouble").parse_string("ABC")
def testLiteralVsKeyword(self):
integer = ppc.integer
literal_expr = integer + pp.Literal("start") + integer
keyword_expr = integer + pp.Keyword("start") + integer
caseless_keyword_expr = integer + pp.CaselessKeyword("START") + integer
word_keyword_expr = (
integer + pp.Word(pp.alphas, as_keyword=True).set_name("word") + integer
)
print()
test_string = "1 start 2"
print(test_string)
print(literal_expr, literal_expr.parse_string(test_string, parse_all=True))
print(keyword_expr, keyword_expr.parse_string(test_string, parse_all=True))
print(
caseless_keyword_expr,
caseless_keyword_expr.parse_string(test_string, parse_all=True),
)
print(
word_keyword_expr, word_keyword_expr.parse_string(test_string, parse_all=True)
)
print()
test_string = "3 start4"
print(test_string)
print(literal_expr, literal_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(keyword_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(caseless_keyword_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(word_keyword_expr.parse_string(test_string, parse_all=True))
print()
test_string = "5start 6"
print(test_string)
print(literal_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(keyword_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(caseless_keyword_expr.parse_string(test_string, parse_all=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(word_keyword_expr.parse_string(test_string, parse_all=True))
def testCol(self):
test = "*\n* \n* ALF\n*\n"
initials = [c for i, c in enumerate(test) if pp.col(i, test) == 1]
print(initials)
self.assertTrue(
len(initials) == 4 and all(c == "*" for c in initials), "fail col test"
)
def testLiteralException(self):
for cls in (
pp.Literal,
pp.CaselessLiteral,
pp.Keyword,
pp.CaselessKeyword,
pp.Word,
pp.Regex,
):
expr = cls("xyz") # .set_name('{}_expr'.format(cls.__name__.lower()))
try:
expr.parse_string(" ", parse_all=True)
except Exception as e:
print(cls.__name__, str(e))
self.assertTrue(
isinstance(e, pp.ParseBaseException),
f"class {cls.__name__} raised wrong exception type {type(e).__name__}",
)
def testParseActionIndexErrorException(self):
"""
Tests raising an IndexError in a parse action
"""
import traceback
number = pp.Word(pp.nums)
def number_action():
raise IndexError # this is the important line!
number.add_parse_action(number_action)
symbol = pp.Word("abcd", max=1)
expr = pp.Group(number) ^ symbol
try:
expr.parse_string("1 + 2", parse_all=True)
except IndexError as ie:
pass
except Exception as e:
traceback.print_exc()
self.fail(f"Expected IndexError not raised, raised {type(e).__name__}: {e}")
else:
self.fail("Expected IndexError not raised")
# tests Issue #22
def testParseActionNesting(self):
vals = pp.OneOrMore(ppc.integer)("int_values")
def add_total(tokens):
tokens["total"] = sum(tokens)
return tokens
vals.add_parse_action(add_total)
results = vals.parse_string("244 23 13 2343", parse_all=True)
print(results.dump())
self.assertParseResultsEquals(
results,
expected_dict={"int_values": [244, 23, 13, 2343], "total": 2623},
msg="noop parse action changed ParseResults structure",
)
name = pp.Word(pp.alphas)("name")
score = pp.Word(pp.nums + ".")("score")
nameScore = pp.Group(name + score)
line1 = nameScore("Rider")
result1 = line1.parse_string("Mauney 46.5", parse_all=True)
print("### before parse action is added ###")
print("result1.dump():\n" + result1.dump() + "\n")
before_pa_dict = result1.as_dict()
line1.set_parse_action(lambda t: t)
result1 = line1.parse_string("Mauney 46.5", parse_all=True)
after_pa_dict = result1.as_dict()
print("### after parse action was added ###")
print("result1.dump():\n" + result1.dump() + "\n")
self.assertEqual(
before_pa_dict,
after_pa_dict,
"noop parse action changed ParseResults structure",
)
def testParseActionWithDelimitedList(self):
class AnnotatedToken:
def __init__(self, kind, elements):
self.kind = kind
self.elements = elements
def __str__(self):
return f"AnnotatedToken({self.kind!r}, {self.elements!r})"
def __eq__(self, other):
return (
type(self) == type(other)
and self.kind == other.kind
and self.elements == other.elements
)
__repr__ = __str__
def annotate(name):
def _(t):
return AnnotatedToken(name, t.as_list())
return _
identifier = pp.Word(pp.srange("[a-z0-9]"))
numeral = pp.Word(pp.nums)
named_number_value = pp.Suppress("(") + numeral + pp.Suppress(")")
named_number = identifier + named_number_value
named_number_list = (
pp.Suppress("{")
+ pp.Group(pp.Optional(pp.DelimitedList(named_number)))
+ pp.Suppress("}")
)
# repro but in #345 - DelimitedList silently changes contents of named_number
named_number_value.set_parse_action(annotate("val"))
test_string = "{ x1(1), x2(2) }"
expected = [
["x1", AnnotatedToken("val", ["1"]), "x2", AnnotatedToken("val", ["2"])]
]
self.assertParseAndCheckList(named_number_list, test_string, expected)
def testParseActionRunsInNotAny(self):
# see Issue #482
data = """ [gog1] [G1] [gog2] [gog3] [gog4] [G2] [gog5] [G3] [gog6] """
poi_type = pp.Word(pp.alphas).set_results_name("type")
poi = pp.Suppress("[") + poi_type + pp.Char(pp.nums) + pp.Suppress("]")
def cnd_is_type(val):
return lambda toks: toks.type == val
poi_gog = poi("gog").add_condition(cnd_is_type("gog"))
poi_g = poi("g").add_condition(cnd_is_type("G"))
pattern = poi_gog + ~poi_g
matches = pattern.search_string(data).as_list()
self.assertEqual(
[["gog", "2"], ["gog", "3"], ["gog", "6"]],
matches,
"failed testing parse actions being run inside a NotAny",
)
def testParseResultsNameBelowUngroupedName(self):
rule_num = pp.Regex("[0-9]+")("LIT_NUM*")
list_num = pp.Group(
pp.Literal("[")("START_LIST")
+ pp.DelimitedList(rule_num)("LIST_VALUES")
+ pp.Literal("]")("END_LIST")
)("LIST")
test_string = "[ 1,2,3,4,5,6 ]"
success, _ = list_num.run_tests(test_string)
self.assertTrue(success)
U = list_num.parse_string(test_string, parse_all=True)
self.assertTrue(
"LIT_NUM" not in U.LIST.LIST_VALUES,
"results name retained as sub in ungrouped named result",
)
def testParseResultsNamesInGroupWithDict(self):
key = ppc.identifier()
value = ppc.integer()
lat = ppc.real()
long = ppc.real()
EQ = pp.Suppress("=")
data = (
lat("lat")
+ long("long")
+ pp.Dict(pp.OneOrMore(pp.Group(key + EQ + value)))
)
site = pp.QuotedString('"')("name") + pp.Group(data)("data")
test_string = '"Golden Gate Bridge" 37.819722 -122.478611 height=746 span=4200'
success, _ = site.run_tests(test_string)
self.assertTrue(success)
a, aEnd = pp.make_html_tags("a")
attrs = a.parse_string("<a href='blah'>", parse_all=True)
print(attrs.dump())
self.assertParseResultsEquals(
attrs,
expected_dict={
"startA": {"href": "blah", "tag": "a", "empty": False},
"href": "blah",
"tag": "a",
"empty": False,
},
)
def testMakeXMLTags(self):
"""test helper function make_xml_tags in simple use case"""
body, bodyEnd = pp.make_xml_tags("body")
tst = "<body>Hello</body>"
expr = body + pp.Word(pp.alphas)("contents") + bodyEnd
result = expr.parse_string(tst, parse_all=True)
print(result.dump())
self.assertParseResultsEquals(
result, ["body", False, "Hello", "</body>"], msg="issue using make_xml_tags"
)
def testFollowedBy(self):
expr = pp.Word(pp.alphas)("item") + pp.FollowedBy(ppc.integer("qty"))
result = expr.parse_string("balloon 99", parse_all=False)
print(result.dump())
self.assertTrue("qty" in result, "failed to capture results name in FollowedBy")
self.assertEqual(
{"item": "balloon", "qty": 99},
result.as_dict(),
"invalid results name structure from FollowedBy",
)
def testSetBreak(self):
"""
Test behavior of ParserElement.set_break(), to invoke the debugger before parsing that element is attempted.
Temporarily monkeypatches sys.breakpointhook().
"""
was_called = False
def mock_set_trace(*args, **kwargs):
nonlocal was_called
was_called = True
wd = pp.Word(pp.alphas)
wd.set_break()
print("Before parsing with set_break:", was_called)
with ppt.reset_pyparsing_context():
sys.breakpointhook = mock_set_trace
wd.parse_string("ABC", parse_all=True)
print("After parsing with set_break:", was_called)
sys.breakpointhook = sys.__breakpointhook__
self.assertTrue(was_called, "set_trace wasn't called by set_break")
def testUnicodeTests(self):
import unicodedata
ppu = pp.pyparsing_unicode
unicode_version = unicodedata.unidata_version
print(f"Unicode version {unicode_version}")
# verify ranges are converted to sets properly
for unicode_property, expected_length in [
("alphas", 48965),
("alphanums", 49430),
("identchars", 49013),
("identbodychars", 50729),
("printables", 65484),
]:
charset = getattr(ppu.BMP, unicode_property)
charset_len = len(charset)
# this subtest is sensitive to the Unicode version used in the current
# python version
if unicode_version == "14.0.0":
with self.subTest(unicode_property=unicode_property, msg="verify len"):
print(f"ppu.BMP.{unicode_property:14}: {charset_len:6d}")
self.assertEqual(
charset_len,
expected_length,
f"incorrect number of ppu.BMP.{unicode_property},"
f" found {charset_len} expected {expected_length}",
)
with self.subTest(unicode_property=unicode_property, msg="verify unique"):
char_counts = collections.Counter(charset)
self.assertTrue(
all(count == 1 for count in char_counts.values()),
f"duplicate items found in ppu.BMP.{unicode_property}:"
f" {[(ord(c), c) for c, count in char_counts.items() if count > 1]}",
)
# verify proper merging of ranges by addition
kanji_printables = ppu.Japanese.Kanji.printables
katakana_printables = ppu.Japanese.Katakana.printables
hiragana_printables = ppu.Japanese.Hiragana.printables
japanese_printables = ppu.Japanese.printables
with self.subTest(msg="verify constructing ranges by merging types"):
self.assertEqual(
set(kanji_printables + katakana_printables + hiragana_printables),
set(japanese_printables),
"failed to construct ranges by merging Japanese types",
)
# verify proper merging of ranges using multiple inheritance
cjk_printables = ppu.CJK.printables
chinese_printables = ppu.Chinese.printables
korean_printables = ppu.Korean.printables
with self.subTest(
msg="verify merging ranges by using multiple inheritance generates unique list of characters"
):
char_counts = collections.Counter(cjk_printables)
self.assertTrue(
all(count == 1 for count in char_counts.values()),
"duplicate items found in ppu.CJK.printables:"
f" {[(ord(c), c) for c, count in char_counts.items() if count > 1]}",
)
with self.subTest(
msg="verify merging ranges by using multiple inheritance generates sorted list of characters"
):
self.assertEqual(
list(cjk_printables),
sorted(cjk_printables),
"CJK printables are not sorted",
)
with self.subTest(
msg="verify summing chars is equivalent to merging ranges by using multiple inheritance (CJK)"
):
print(
len(set(chinese_printables + korean_printables + japanese_printables)),
len(cjk_printables),
)
self.assertEqual(
set(chinese_printables + korean_printables + japanese_printables),
set(cjk_printables),
"failed to construct ranges by merging Chinese, Japanese and Korean",
)
def testUnicodeTests2(self):
ppu = pp.unicode
alphas = ppu.Greek.alphas
greet = pp.Word(alphas) + "," + pp.Word(alphas) + "!"
# input string
hello = "Καλημέρα, κόσμε!"
result = greet.parse_string(hello, parse_all=True)
print(result)
self.assertParseResultsEquals(
result,
expected_list=["Καλημέρα", ",", "κόσμε", "!"],
msg="Failed to parse Greek 'Hello, World!' using "
"pyparsing_unicode.Greek.alphas",
)
# define a custom unicode range using multiple inheritance
class Turkish_set(ppu.Latin1, ppu.LatinA):
pass
for attrname in "printables alphas nums identchars identbodychars".split():
with self.subTest(
"verify unicode_set composed using MI", attrname=attrname
):
latin1_value = getattr(ppu.Latin1, attrname)
latinA_value = getattr(ppu.LatinA, attrname)
turkish_value = getattr(Turkish_set, attrname)
self.assertEqual(
set(latin1_value + latinA_value),
set(turkish_value),
f"failed to construct ranges by merging Latin1 and LatinA ({attrname})",
)
with self.subTest("Test using new Turkish_set for parsing"):
key = pp.Word(Turkish_set.alphas)
value = ppc.integer | pp.Word(Turkish_set.alphas, Turkish_set.alphanums)
EQ = pp.Suppress("=")
key_value = key + EQ + value
sample = """\
şehir=İzmir
ülke=Türkiye
nüfus=4279677"""
result = pp.Dict(pp.OneOrMore(pp.Group(key_value))).parse_string(
sample, parse_all=True
)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_dict={"şehir": "İzmir", "ülke": "Türkiye", "nüfus": 4279677},
msg="Failed to parse Turkish key-value pairs",
)
# Basic Multilingual Plane only contains chars up to 65535
def filter_16_bit(s):
return "".join(c for c in s if ord(c) < 2**16)
with self.subTest():
bmp_printables = ppu.BMP.printables
sample = (
"".join(
random.choice(filter_16_bit(unicode_set.printables))
for unicode_set in (
ppu.Japanese,
Turkish_set,
ppu.Greek,
ppu.Hebrew,
ppu.Devanagari,
ppu.Hangul,
ppu.Latin1,
ppu.Chinese,
ppu.Cyrillic,
ppu.Arabic,
ppu.Thai,
)
for _ in range(8)
)
+ "\N{REPLACEMENT CHARACTER}"
)
print(sample)
self.assertParseAndCheckList(pp.Word(bmp_printables), sample, [sample])
def testUnicodeSetNameEquivalence(self):
ppu = pp.unicode
for ascii_name, unicode_name in [
("Arabic", "العربية"),
("Chinese", "中文"),
("Cyrillic", "кириллица"),
("Greek", "Ελληνικά"),
("Hebrew", "עִברִית"),
("Japanese", "日本語"),
("Korean", "한국어"),
("Thai", "ไทย"),
("Devanagari", "देवनागरी"),
]:
with self.subTest(ascii_name=ascii_name, unicode_name=unicode_name):
self.assertTrue(
eval(f"ppu.{ascii_name} is ppu.{unicode_name}", {}, locals())
)
def testIndentedBlock(self):
# parse pseudo-yaml indented text
EQ = pp.Suppress("=")
stack = [1]
key = ppc.identifier
value = pp.Forward()
key_value = pp.Group(key + EQ + value)
compound_value = pp.Dict(pp.ungroup(pp.IndentedBlock(key_value, grouped=True)))
value <<= ppc.integer | pp.QuotedString("'") | compound_value
parser = pp.Dict(pp.OneOrMore(key_value))
text = """
a = 100
b = 101
c =
c1 = 200
c2 =
c21 = 999
c3 = 'A horse, a horse, my kingdom for a horse'
d = 505
"""
text = dedent(text)
print(text)
result = parser.parse_string(text, parse_all=True)
print(result.dump())
self.assertEqual(100, result.a, "invalid indented block result")
self.assertEqual(200, result.c.c1, "invalid indented block result")
self.assertEqual(999, result.c.c2.c21, "invalid indented block result")
# exercise indented_block with example posted in issue #87
def testIndentedBlockTest2(self):
indent_stack = [1]
key = pp.Word(pp.alphas, pp.alphanums) + pp.Suppress(":")
stmt = pp.Forward()
suite = pp.IndentedBlock(stmt, grouped=True)
body = key + suite
pattern = (
pp.Word(pp.alphas)
+ pp.Suppress("(")
+ pp.Word(pp.alphas)
+ pp.Suppress(")")
)
stmt <<= pattern
def key_parse_action(toks):
print(f"Parsing '{toks[0]}'...")
key.set_parse_action(key_parse_action)
header = pp.Suppress("[") + pp.Literal("test") + pp.Suppress("]")
content = header - pp.OneOrMore(pp.IndentedBlock(body))
contents = pp.Forward()
suites = pp.IndentedBlock(content)
extra = pp.Literal("extra") + pp.Suppress(":") - suites
contents <<= content | extra
parser = pp.OneOrMore(contents)
sample = dedent(
"""
extra:
[test]
one0:
two (three)
four0:
five (seven)
extra:
[test]
one1:
two (three)
four1:
five (seven)
"""
)
success, _ = parser.run_tests([sample])
self.assertTrue(success, "Failed IndentedBlock test for issue #87")
sample2 = dedent(
"""
extra:
[test]
one:
two (three)
four:
five (seven)
extra:
[test]
one:
two (three)
four:
five (seven)
[test]
one:
two (three)
four:
five (seven)
[test]
eight:
nine (ten)
eleven:
twelve (thirteen)
fourteen:
fifteen (sixteen)
seventeen:
eighteen (nineteen)
"""
)
del indent_stack[1:]
success, _ = parser.run_tests([sample2])
self.assertTrue(success, "Failed indented_block multi-block test for issue #87")
def testIndentedBlockScan(self):
def get_parser():
"""
A valid statement is the word "block:", followed by an indent, followed by the letter A only, or another block
"""
stack = [1]
block = pp.Forward()
body = pp.IndentedBlock(
pp.Literal("A") ^ block
)
block <<= pp.Literal("block:") + body
return block
# This input string is a perfect match for the parser, so a single match is found
p1 = get_parser()
r1 = list(
p1.scan_string(
dedent(
"""\
block:
A
"""
)
)
)
self.assertEqual(1, len(r1))
# This input string is a perfect match for the parser, except for the letter B instead of A, so this will fail (and should)
p2 = get_parser()
r2 = list(
p2.scan_string(
dedent(
"""\
block:
B
"""
)
)
)
self.assertEqual(0, len(r2))
# This input string contains both string A and string B, and it finds one match (as it should)
p3 = get_parser()
r3 = list(
p3.scan_string(
dedent(
"""\
block:
A
block:
B
"""
)
)
)
self.assertEqual(1, len(r3))
# This input string contains both string A and string B, but in a different order.
p4 = get_parser()
r4 = list(
p4.scan_string(
dedent(
"""\
block:
B
block:
A
"""
)
)
)
self.assertEqual(1, len(r4))
# This is the same as case 3, but with nesting
p5 = get_parser()
r5 = list(
p5.scan_string(
dedent(
"""\
block:
block:
A
block:
block:
B
"""
)
)
)
self.assertEqual(1, len(r5))
# This is the same as case 4, but with nesting
p6 = get_parser()
r6 = list(
p6.scan_string(
dedent(
"""\
block:
block:
B
block:
block:
A
"""
)
)
)
self.assertEqual(1, len(r6))
def testIndentedBlockClass(self):
data = """\
A
100
101
102
B
200
201
C
300
"""
integer = ppc.integer
group = pp.Group(pp.Char(pp.alphas) + pp.IndentedBlock(integer))
group[...].parse_string(data, parse_all=True).pprint()
self.assertParseAndCheckList(
group[...], data, [["A", [100, 101, 102]], ["B", [200, 201]], ["C", [300]]]
)
def testIndentedBlockClass2(self):
datas = [
"""\
A
100
B
200
201
""",
"""\
A
100
B
200
201
""",
"""\
A
100
B
200
201
""",
]
integer = ppc.integer
group = pp.Group(
pp.Char(pp.alphas) + pp.IndentedBlock(integer, recursive=False)
)
for data in datas:
print()
print(ppt.with_line_numbers(data))
print(group[...].parse_string(data).as_list())
self.assertParseAndCheckList(
group[...] + integer.suppress(),
data,
[["A", [100]], ["B", [200]]],
verbose=False,
)
def testIndentedBlockClassWithRecursion(self):
data = """\
A
100
101
102
B
b
200
201
C
300
"""
integer = ppc.integer
group = pp.Forward()
group <<= pp.Group(pp.Char(pp.alphas) + pp.IndentedBlock(integer | group))
print("using search_string")
print(group.search_string(data))
# print(sum(group.search_string(data)).dump())
self.assertParseAndCheckList(
group[...],
data,
[["A", [100, 101, 102]], ["B", [["b", [200, 201]]]], ["C", [300]]],
)
print("using parse_string")
print(group[...].parse_string(data, parse_all=True).dump())
dotted_int = pp.DelimitedList(
pp.Word(pp.nums), ".", allow_trailing_delim=True, combine=True
)
indented_expr = pp.IndentedBlock(dotted_int, recursive=True, grouped=True)
# indented_expr = pp.Forward()
# indented_expr <<= pp.IndentedBlock(dotted_int + indented_expr))
good_data = """\
1.
1.1
1.1.1
1.1.2
2."""
bad_data1 = """\
1.
1.1
1.1.1
1.2
2."""
bad_data2 = """\
1.
1.1
1.1.1
1.2
2."""
print("test good indentation")
print(pp.pyparsing_test.with_line_numbers(good_data))
print(indented_expr.parse_string(good_data, parse_all=True).as_list())
print()
print("test bad indentation")
print(pp.pyparsing_test.with_line_numbers(bad_data1))
with self.assertRaisesParseException(
msg="Failed to raise exception with bad indentation 1"
):
indented_expr.parse_string(bad_data1, parse_all=True)
print(pp.pyparsing_test.with_line_numbers(bad_data2))
with self.assertRaisesParseException(
msg="Failed to raise exception with bad indentation 2"
):
indented_expr.parse_string(bad_data2, parse_all=True)
def testInvalidDiagSetting(self):
with self.assertRaises(
ValueError,
msg="failed to raise exception when setting non-existent __diag__",
):
pp.__diag__.enable("xyzzy")
with self.assertWarns(
UserWarning, msg="failed to warn disabling 'collect_all_And_tokens"
):
pp.__compat__.disable("collect_all_And_tokens")
def testParseResultsWithNameMatchFirst(self):
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
expr = (expr_a | expr_b)("rexp")
success, report = expr.run_tests(
"""\
not the bird
the bird
"""
)
results = [rpt[1] for rpt in report]
self.assertParseResultsEquals(
results[0], ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
self.assertParseResultsEquals(
results[1], ["the", "bird"], {"rexp": ["the", "bird"]}
)
# test compatibility mode, no longer restoring pre-2.3.1 behavior
with ppt.reset_pyparsing_context():
pp.__compat__.collect_all_And_tokens = False
pp.enable_diag(pp.Diagnostics.warn_multiple_tokens_in_named_alternation)
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
with self.assertWarns(
UserWarning, msg="failed to warn of And within alternation"
):
expr = (expr_a | expr_b)("rexp")
with self.assertDoesNotWarn(
UserWarning,
msg="warned when And within alternation warning was suppressed",
):
expr = (expr_a | expr_b).suppress_warning(
pp.Diagnostics.warn_multiple_tokens_in_named_alternation
)("rexp")
success, report = expr.run_tests(
"""
not the bird
the bird
"""
)
results = [rpt[1] for rpt in report]
self.assertParseResultsEquals(
results[0], ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
self.assertParseResultsEquals(
results[1], ["the", "bird"], {"rexp": ["the", "bird"]}
)
def testParseResultsWithNameOr(self):
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
expr = (expr_a ^ expr_b)("rexp")
success, _ = expr.run_tests(
"""\
not the bird
the bird
"""
)
self.assertTrue(success)
result = expr.parse_string("not the bird", parse_all=True)
self.assertParseResultsEquals(
result, ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
result = expr.parse_string("the bird", parse_all=True)
self.assertParseResultsEquals(
result, ["the", "bird"], {"rexp": ["the", "bird"]}
)
expr = (expr_a | expr_b)("rexp")
success, _ = expr.run_tests(
"""\
not the bird
the bird
"""
)
self.assertTrue(success)
result = expr.parse_string("not the bird", parse_all=True)
self.assertParseResultsEquals(
result, ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
result = expr.parse_string("the bird", parse_all=True)
self.assertParseResultsEquals(
result, ["the", "bird"], {"rexp": ["the", "bird"]}
)
# test compatibility mode, no longer restoring pre-2.3.1 behavior
with ppt.reset_pyparsing_context():
pp.__compat__.collect_all_And_tokens = False
pp.enable_diag(pp.Diagnostics.warn_multiple_tokens_in_named_alternation)
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
with self.assertWarns(
UserWarning, msg="failed to warn of And within alternation"
):
expr = (expr_a ^ expr_b)("rexp")
with self.assertDoesNotWarn(
UserWarning,
msg="warned when And within alternation warning was suppressed",
):
expr = (expr_a ^ expr_b).suppress_warning(
pp.Diagnostics.warn_multiple_tokens_in_named_alternation
)("rexp")
success, _ = expr.run_tests(
"""\
not the bird
the bird
"""
)
self.assertTrue(success)
self.assertEqual(
"not the bird".split(),
list(expr.parse_string("not the bird", parse_all=True)["rexp"]),
)
self.assertEqual(
"the bird".split(),
list(expr.parse_string("the bird", parse_all=True)["rexp"]),
)
def testEmptyDictDoesNotRaiseException(self):
key = pp.Word(pp.alphas)
value = pp.Word(pp.nums)
EQ = pp.Suppress("=")
key_value_dict = pp.dict_of(key, EQ + value)
print(
key_value_dict.parse_string(
"""\
a = 10
b = 20
""",
parse_all=True,
).dump()
)
try:
print(key_value_dict.parse_string("", parse_all=True).dump())
except pp.ParseException as pe:
print(pp.ParseException.explain(pe))
else:
self.fail("failed to raise exception when matching empty string")
def testExplainException(self):
expr = pp.Word(pp.nums).set_name("int") + pp.Word(pp.alphas).set_name("word")
try:
expr.parse_string("123 355", parse_all=True)
except pp.ParseException as pe:
print(pe.explain(depth=0))
expr = pp.Word(pp.nums).set_name("int") - pp.Word(pp.alphas).set_name("word")
try:
expr.parse_string("123 355 (test using ErrorStop)", parse_all=True)
except pp.ParseSyntaxException as pe:
print(pe.explain())
integer = pp.Word(pp.nums).set_name("int").add_parse_action(lambda t: int(t[0]))
expr = integer + integer
def divide_args(t):
integer.parse_string("A", parse_all=True)
return t[0] / t[1]
expr.add_parse_action(divide_args)
try:
expr.parse_string("123 0", parse_all=True)
except pp.ParseException as pe:
print(pe.explain())
except Exception as exc:
print(pp.ParseBaseException.explain_exception(exc))
raise
def testExplainExceptionWithMemoizationCheck(self):
if pp.ParserElement._left_recursion_enabled or pp.ParserElement._packratEnabled:
print("test does local memoization enable/disable during test")
return
pp.ParserElement.disable_memoization()
integer = pp.Word(pp.nums).set_name("int").add_parse_action(lambda t: int(t[0]))
expr = integer + integer
def divide_args(t):
integer.parse_string("A", parse_all=True)
return t[0] / t[1]
expr.add_parse_action(divide_args)
for memo_kind, enable_memo in [
("Packrat", pp.ParserElement.enable_packrat),
("Left Recursion", pp.ParserElement.enable_left_recursion),
]:
enable_memo(force=True)
print("Explain for", memo_kind)
try:
expr.parse_string("123 0", parse_all=True)
except pp.ParseException as pe:
print(pe.explain())
except Exception as exc:
print(pp.ParseBaseException.explain_exception(exc))
raise
# make sure we leave the state compatible with everything
pp.ParserElement.disable_memoization()
def testCaselessKeywordVsKeywordCaseless(self):
frule = pp.Keyword("t", caseless=True) + pp.Keyword("yes", caseless=True)
crule = pp.CaselessKeyword("t") + pp.CaselessKeyword("yes")
flist = frule.search_string("not yes").as_list()
print(flist)
clist = crule.search_string("not yes").as_list()
print(clist)
self.assertEqual(
flist,
clist,
"CaselessKeyword not working the same as Keyword(caseless=True)",
)
def testOneOf(self):
expr = pp.one_of("a b abb")
assert expr.pattern == "abb|a|b"
expr = pp.one_of("a abb b abb")
assert expr.pattern == "abb|a|b"
expr = pp.one_of("a abb abbb b abb")
assert expr.pattern == "abbb|abb|a|b"
expr = pp.one_of("a abbb abb b abb")
assert expr.pattern == "abbb|abb|a|b"
# make sure regex-unsafe characters are properly escaped
expr = pp.one_of("a+ b* c? () +a *b ?c")
assert expr.pattern == r"a\+|b\*|c\?|\(\)|\+a|\*b|\?c"
def testOneOfKeywords(self):
literal_expr = pp.one_of("a b c")
success, _ = literal_expr[...].run_tests(
"""
# literal one_of tests
a b c
a a a
abc
"""
)
self.assertTrue(success, "failed literal one_of matching")
keyword_expr = pp.one_of("a b c", as_keyword=True)
success, _ = keyword_expr[...].run_tests(
"""
# keyword one_of tests
a b c
a a a
"""
)
self.assertTrue(success, "failed keyword one_of matching")
success, _ = keyword_expr[...].run_tests(
"""
# keyword one_of failure tests
abc
""",
failure_tests=True,
)
self.assertTrue(success, "failed keyword one_of failure tests")
def testWarnUngroupedNamedTokens(self):
"""
- warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names (default=True)
"""
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_ungrouped_named_tokens_in_collection} warning when not enabled"
):
COMMA = pp.Suppress(",").set_name("comma")
coord = ppc.integer("x") + COMMA + ppc.integer("y")
path = coord[...].set_results_name("path")
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_ungrouped_named_tokens_in_collection)
COMMA = pp.Suppress(",").set_name("comma")
coord = ppc.integer("x") + COMMA + ppc.integer("y")
# this should emit a warning
with self.assertWarns(
UserWarning,
msg="failed to warn with named repetition of"
" ungrouped named expressions",
):
path = coord[...].set_results_name("path")
with self.assertDoesNotWarn(
UserWarning,
msg="warned when named repetition of"
" ungrouped named expressions warning was suppressed",
):
path = (
coord[...]
.suppress_warning(
pp.Diagnostics.warn_ungrouped_named_tokens_in_collection
)
.set_results_name("path")
)
def testDontWarnUngroupedNamedTokensIfWarningSuppressed(self):
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_ungrouped_named_tokens_in_collection)
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_ungrouped_named_tokens_in_collection}"
f" warning when warn on ungrouped named tokens was suppressed (original_text_for)"
):
pp.original_text_for(pp.Word("ABC")[...])("words")
def testWarnNameSetOnEmptyForward(self):
"""
- warn_name_set_on_empty_Forward - flag to enable warnings when a Forward is defined
with a results name, but has no contents defined (default=False)
"""
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_name_set_on_empty_Forward} warning when not enabled"
):
base = pp.Forward()("z")
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_name_set_on_empty_Forward)
base = pp.Forward()
with self.assertWarns(
UserWarning,
msg="failed to warn when naming an empty Forward expression",
):
base("x")
with self.assertDoesNotWarn(
UserWarning,
msg="warned when naming an empty Forward expression warning was suppressed",
):
base.suppress_warning(pp.Diagnostics.warn_name_set_on_empty_Forward)(
"x"
)
def testWarnParsingEmptyForward(self):
"""
- warn_on_parse_using_empty_Forward - flag to enable warnings when a Forward
has no contents defined (default=False)
"""
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_on_parse_using_empty_Forward} warning when not enabled"
):
base = pp.Forward()
try:
print(base.parse_string("x", parse_all=True))
except ParseException as pe:
pass
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_on_parse_using_empty_Forward)
base = pp.Forward()
with self.assertWarns(
UserWarning,
msg="failed to warn when parsing using an empty Forward expression",
):
try:
print(base.parse_string("x", parse_all=True))
except ParseException as pe:
pass
with self.assertDoesNotWarn(
UserWarning,
msg="warned when parsing using an empty Forward expression warning was suppressed",
):
base.suppress_warning(pp.Diagnostics.warn_on_parse_using_empty_Forward)
try:
print(base.parse_string("x", parse_all=True))
except ParseException as pe:
pass
def testWarnIncorrectAssignmentToForward(self):
"""
- warn_on_parse_using_empty_Forward - flag to enable warnings when a Forward
has no contents defined (default=False)
"""
if PYPY_ENV:
print("warn_on_assignment_to_Forward not supported on PyPy")
return
def a_method():
base = pp.Forward()
base = pp.Word(pp.alphas)[...] | "(" + base + ")"
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_on_assignment_to_Forward} warning when not enabled"
):
a_method()
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_on_assignment_to_Forward)
with self.assertWarns(
UserWarning,
msg="failed to warn when using '=' to assign expression to a Forward",
):
a_method()
def a_method():
base = pp.Forward().suppress_warning(
pp.Diagnostics.warn_on_assignment_to_Forward
)
base = pp.Word(pp.alphas)[...] | "(" + base + ")"
with self.assertDoesNotWarn(
UserWarning,
msg="warned when using '=' to assign expression to a Forward warning was suppressed",
):
a_method()
def testWarnOnMultipleStringArgsToOneOf(self):
"""
- warn_on_multiple_string_args_to_oneof - flag to enable warnings when one_of is
incorrectly called with multiple str arguments (default=True)
"""
with self.assertDoesNotWarn(
msg=f"raised {pp.Diagnostics.warn_on_multiple_string_args_to_oneof} warning when not enabled"
):
a = pp.one_of("A", "B")
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.warn_on_multiple_string_args_to_oneof)
with self.assertWarns(
UserWarning,
msg="failed to warn when incorrectly calling one_of(string, string)",
):
a = pp.one_of("A", "B")
def testAutonameElements(self):
with ppt.reset_pyparsing_context():
pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions)
a = pp.Literal("a")
b = pp.Literal("b").set_name("bbb")
z = pp.Literal("z")
leading_a = a + pp.FollowedBy(z | a | b)
grammar = (z | leading_a | b)[...] + "a"
self.assertFalse(a.debug)
self.assertFalse(a.customName)
pp.autoname_elements()
self.assertTrue(a.debug)
self.assertEqual("a", a.name)
self.assertEqual("bbb", b.name)
def testDelimitedListName(self):
bool_constant = pp.Literal("True") | "true" | "False" | "false"
bool_list = pp.DelimitedList(bool_constant)
print(bool_list)
self.assertEqual(
"{'True' | 'true' | 'False' | 'false'} [, {'True' | 'true' | 'False' | 'false'}]...",
str(bool_list),
)
bool_constant.set_name("bool")
print(bool_constant)
print(bool_constant.streamline())
bool_list2 = pp.DelimitedList(bool_constant)
print(bool_constant)
print(bool_constant.streamline())
print(bool_list2)
with self.subTest():
self.assertEqual("bool [, bool]...", str(bool_list2))
with self.subTest():
street_address = pp.common.integer.set_name("integer") + pp.Word(pp.alphas)[
1, ...
].set_name("street_name")
self.assertEqual(
"{integer street_name} [, {integer street_name}]...",
str(pp.DelimitedList(street_address)),
)
with self.subTest():
operand = pp.Char(pp.alphas).set_name("var")
math = pp.infix_notation(
operand,
[
(pp.one_of("+ -"), 2, pp.OpAssoc.LEFT),
],
)
self.assertEqual(
"var_expression [, var_expression]...",
str(pp.DelimitedList(math)),
)
def testDelimitedListOfStrLiterals(self):
expr = pp.DelimitedList("ABC")
print(str(expr))
source = "ABC, ABC,ABC"
self.assertParseAndCheckList(
expr, source, [s.strip() for s in source.split(",")]
)
def testDelimitedListMinMax(self):
source = "ABC, ABC,ABC"
with self.assertRaises(ValueError, msg="min must be greater than 0"):
pp.DelimitedList("ABC", min=0)
with self.assertRaises(
ValueError, msg="max must be greater than, or equal to min"
):
pp.DelimitedList("ABC", min=1, max=0)
with self.assertRaises(pp.ParseException):
pp.DelimitedList("ABC", min=4).parse_string(source)
source_expr_pairs = [
("ABC, ABC", pp.DelimitedList("ABC", max=2)),
(source, pp.DelimitedList("ABC", min=2, max=4)),
]
for source, expr in source_expr_pairs:
print(str(expr))
self.assertParseAndCheckList(
expr, source, [s.strip() for s in source.split(",")]
)
def testDelimitedListParseActions1(self):
# from issue #408
keyword = pp.Keyword("foobar")
untyped_identifier = ~keyword + pp.Word(pp.alphas)
dotted_vars = pp.DelimitedList(untyped_identifier, delim=".")
lvalue = pp.Opt(dotted_vars)
# uncomment this line to see the problem
stmt = pp.DelimitedList(pp.Opt(dotted_vars))
# stmt = DelimitedList(dotted_vars)
# stmt = pp.Opt(dotted_vars)
def parse_identifier(toks):
print("YAY!", toks)
untyped_identifier.set_parse_action(parse_identifier)
save_stdout = StringIO()
with contextlib.redirect_stdout(save_stdout):
dotted_vars.parse_string("B.C")
self.assertEqual(
dedent(
"""\
YAY! ['B']
YAY! ['C']
"""
),
save_stdout.getvalue(),
)
def testDelimitedListParseActions2(self):
# from issue #408
keyword = pp.Keyword("foobar")
untyped_identifier = ~keyword + pp.Word(pp.alphas)
dotted_vars = pp.DelimitedList(untyped_identifier, delim=".")
lvalue = pp.Opt(dotted_vars)
# uncomment this line to see the problem
# stmt = DelimitedList(Opt(dotted_vars))
stmt = pp.DelimitedList(dotted_vars)
# stmt = pp.Opt(dotted_vars)
def parse_identifier(toks):
print("YAY!", toks)
untyped_identifier.set_parse_action(parse_identifier)
save_stdout = StringIO()
with contextlib.redirect_stdout(save_stdout):
dotted_vars.parse_string("B.C")
self.assertEqual(
dedent(
"""\
YAY! ['B']
YAY! ['C']
"""
),
save_stdout.getvalue(),
)
def testDelimitedListParseActions3(self):
# from issue #408
keyword = pp.Keyword("foobar")
untyped_identifier = ~keyword + pp.Word(pp.alphas)
dotted_vars = pp.DelimitedList(untyped_identifier, delim=".")
lvalue = pp.Opt(dotted_vars)
# uncomment this line to see the problem
# stmt = DelimitedList(Opt(dotted_vars))
# stmt = DelimitedList(dotted_vars)
stmt = pp.Opt(dotted_vars)
def parse_identifier(toks):
print("YAY!", toks)
untyped_identifier.set_parse_action(parse_identifier)
save_stdout = StringIO()
with contextlib.redirect_stdout(save_stdout):
dotted_vars.parse_string("B.C")
self.assertEqual(
dedent(
"""\
YAY! ['B']
YAY! ['C']
"""
),
save_stdout.getvalue(),
)
def testTagElements(self):
end_punc = (
("." + pp.Tag("mood", "normal"))
| ("!" + pp.Tag("mood", "excited"))
| ("?" + pp.Tag("mood", "curious"))
)
greeting = "Hello" + pp.Word(pp.alphas) + end_punc[1, ...]
for ending, expected_mood in [
(".", "normal"),
("!", "excited"),
("?", "curious"),
("!!", "excited"),
("!?", "curious"),
]:
self.assertParseAndCheckDict(
greeting, f"Hello World{ending}", {"mood": expected_mood}
)
def testEnableDebugOnNamedExpressions(self):
"""
- enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
calls to ParserElement.set_name() (default=False)
"""
with ppt.reset_pyparsing_context():
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions)
integer = pp.Word(pp.nums).set_name("integer")
integer[...].parse_string("1 2 3", parse_all=True)
expected_debug_output = dedent(
"""\
Match integer at loc 0(1,1)
1 2 3
^
Matched integer -> ['1']
Match integer at loc 2(1,3)
1 2 3
^
Matched integer -> ['2']
Match integer at loc 4(1,5)
1 2 3
^
Matched integer -> ['3']
Match integer at loc 5(1,6)
1 2 3
^
Match integer failed, ParseException raised: Expected integer, found end of text (at char 5), (line:1, col:6)
"""
)
output = test_stdout.getvalue()
print(output)
self.assertEqual(
expected_debug_output,
output,
"failed to auto-enable debug on named expressions "
"using enable_debug_on_named_expressions",
)
def testEnableDebugOnExpressionWithParseAction(self):
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
parser = (ppc.integer().set_debug() | pp.Word(pp.alphanums).set_debug())[...]
parser.set_debug()
parser.parse_string("123 A100", parse_all=True)
# now turn off debug - should only get output for components, not overall parser
print()
parser.set_debug(False)
parser.parse_string("123 A100", parse_all=True)
expected_debug_output = dedent(
"""\
Match [{integer | W:(0-9A-Za-z)}]... at loc 0(1,1)
123 A100
^
Match integer at loc 0(1,1)
123 A100
^
Matched integer -> [123]
Match integer at loc 4(1,5)
123 A100
^
Match integer failed, ParseException raised: Expected integer, found 'A100' (at char 4), (line:1, col:5)
Match W:(0-9A-Za-z) at loc 4(1,5)
123 A100
^
Matched W:(0-9A-Za-z) -> ['A100']
Match integer at loc 8(1,9)
123 A100
^
Match integer failed, ParseException raised: Expected integer, found end of text (at char 8), (line:1, col:9)
Match W:(0-9A-Za-z) at loc 8(1,9)
123 A100
^
Match W:(0-9A-Za-z) failed, ParseException raised: Expected W:(0-9A-Za-z), found end of text (at char 8), (line:1, col:9)
Matched [{integer | W:(0-9A-Za-z)}]... -> [123, 'A100']
Match integer at loc 0(1,1)
123 A100
^
Matched integer -> [123]
Match integer at loc 4(1,5)
123 A100
^
Match integer failed, ParseException raised: Expected integer, found 'A100' (at char 4), (line:1, col:5)
Match W:(0-9A-Za-z) at loc 4(1,5)
123 A100
^
Matched W:(0-9A-Za-z) -> ['A100']
Match integer at loc 8(1,9)
123 A100
^
Match integer failed, ParseException raised: Expected integer, found end of text (at char 8), (line:1, col:9)
Match W:(0-9A-Za-z) at loc 8(1,9)
123 A100
^
Match W:(0-9A-Za-z) failed, ParseException raised: Expected W:(0-9A-Za-z), found end of text (at char 8), (line:1, col:9)
"""
)
output = test_stdout.getvalue()
print(output)
self.assertEqual(
expected_debug_output,
output,
"invalid debug output when using parse action",
)
def testEnableDebugWithCachedExpressionsMarkedWithAsterisk(self):
a = pp.Literal("a").set_name("A").set_debug()
b = pp.Literal("b").set_name("B").set_debug()
z = pp.Literal("z").set_name("Z").set_debug()
leading_a = a + pp.FollowedBy(z | a | b)
leading_a.set_name("leading_a").set_debug()
grammar = (z | leading_a | b)[...] + "a"
# parse test string and capture debug output
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
grammar.parse_string("aba", parse_all=True)
expected_debug_output = dedent(
"""\
Match Z at loc 0(1,1)
aba
^
Match Z failed, ParseException raised: Expected Z, found 'aba' (at char 0), (line:1, col:1)
Match leading_a at loc 0(1,1)
aba
^
Match A at loc 0(1,1)
aba
^
Matched A -> ['a']
Match Z at loc 1(1,2)
aba
^
Match Z failed, ParseException raised: Expected Z, found 'ba' (at char 1), (line:1, col:2)
Match A at loc 1(1,2)
aba
^
Match A failed, ParseException raised: Expected A, found 'ba' (at char 1), (line:1, col:2)
Match B at loc 1(1,2)
aba
^
Matched B -> ['b']
Matched leading_a -> ['a']
*Match Z at loc 1(1,2)
aba
^
*Match Z failed, ParseException raised: Expected Z, found 'ba' (at char 1), (line:1, col:2)
Match leading_a at loc 1(1,2)
aba
^
Match A at loc 1(1,2)
aba
^
Match A failed, ParseException raised: Expected A, found 'ba' (at char 1), (line:1, col:2)
Match leading_a failed, ParseException raised: Expected A, found 'ba' (at char 1), (line:1, col:2)
*Match B at loc 1(1,2)
aba
^
*Matched B -> ['b']
Match Z at loc 2(1,3)
aba
^
Match Z failed, ParseException raised: Expected Z, found 'a' (at char 2), (line:1, col:3)
Match leading_a at loc 2(1,3)
aba
^
Match A at loc 2(1,3)
aba
^
Matched A -> ['a']
Match Z at loc 3(1,4)
aba
^
Match Z failed, ParseException raised: Expected Z, found end of text (at char 3), (line:1, col:4)
Match A at loc 3(1,4)
aba
^
Match A failed, ParseException raised: Expected A, found end of text (at char 3), (line:1, col:4)
Match B at loc 3(1,4)
aba
^
Match B failed, ParseException raised: Expected B, found end of text (at char 3), (line:1, col:4)
Match leading_a failed, ParseException raised: Expected {Z | A | B}, found end of text (at char 3), (line:1, col:4)
Match B at loc 2(1,3)
aba
^
Match B failed, ParseException raised: Expected B, found 'a' (at char 2), (line:1, col:3)
"""
)
if pp.ParserElement._packratEnabled:
packrat_status = "enabled"
else:
# remove '*' cache markers from expected output
expected_debug_output = expected_debug_output.replace("*", "")
packrat_status = "disabled"
print("Packrat status:", packrat_status)
output = test_stdout.getvalue()
print(output)
self.assertEqual(
expected_debug_output,
output,
(
f"invalid debug output showing cached results marked with '*',"
f" and packrat parsing {packrat_status}"
),
)
def testSetDebugRecursively(self):
expr = pp.Word(pp.alphas)
contained = expr + pp.Empty().set_name("innermost")
depth = 4
for _ in range(depth):
contained = pp.Group(contained + pp.Empty())
contained.set_debug(recurse=True)
self.assertTrue(expr.debug)
# contained.parse_string("ABC")
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
contained.parse_string("aba", parse_all=True)
output = test_stdout.getvalue()
print(output)
self.assertEqual(depth, output.count("Matched Empty -> []"))
self.assertEqual(1, output.count("Matched innermost -> []"))
def testSetDebugRecursivelyWithForward(self):
expr = pp.Word(pp.alphas).set_name("innermost")
contained = pp.infix_notation(
expr,
[
("NOT", 1, pp.OpAssoc.RIGHT),
("AND", 2, pp.OpAssoc.LEFT),
("OR", 2, pp.OpAssoc.LEFT),
],
)
contained.set_debug(recurse=True)
self.assertTrue(expr.debug)
# contained.parse_string("ABC")
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
contained.parse_string("aba", parse_all=True)
output = test_stdout.getvalue()
print(output)
# count of matches varies with packrat state, can't match exact count, but at least test if contains
# self.assertEqual(4, output.count("Matched innermost -> ['aba']"))
self.assertTrue("Matched innermost -> ['aba']" in output)
def testUndesirableButCommonPractices(self):
# While these are valid constructs, and they are not encouraged
# there is apparently a lot of code out there using these
# coding styles.
#
# Even though they are not encouraged, we shouldn't break them.
# Create an And using a list of expressions instead of using '+' operator
expr = pp.And([pp.Word("abc"), pp.Word("123")])
success, _ = expr.run_tests(
"""
aaa 333
b 1
ababab 32123
"""
)
self.assertTrue(success)
success, _ = expr.run_tests("""\
aad 111
""", failure_tests=True
)
self.assertTrue(success)
# Passing a single expression to a ParseExpression, when it really wants a sequence
expr = pp.Or(pp.Or(ppc.integer))
success, _ = expr.run_tests("""\
123
456
"""
)
self.assertTrue(success)
success, _ = expr.run_tests("""\
abc
""", failure_tests=True
)
self.assertTrue(success)
def testEnableWarnDiags(self):
import pprint
def filtered_vars(var_dict):
dunders = [nm for nm in var_dict if nm.startswith("__")]
return {
k: v
for k, v in var_dict.items()
if isinstance(v, bool) and k not in dunders
}
pprint.pprint(filtered_vars(vars(pp.__diag__)), width=30)
warn_names = pp.__diag__._warning_names
other_names = pp.__diag__._debug_names
# make sure they are off by default
for diag_name in warn_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
f"__diag__.{diag_name} not set to True",
)
with ppt.reset_pyparsing_context():
# enable all warn_* diag_names
pp.enable_all_warnings()
pprint.pprint(filtered_vars(vars(pp.__diag__)), width=30)
# make sure they are on after being enabled
for diag_name in warn_names:
self.assertTrue(
getattr(pp.__diag__, diag_name),
f"__diag__.{diag_name} not set to True",
)
# non-warn diag_names must be enabled individually
for diag_name in other_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
f"__diag__.{diag_name} not set to True",
)
# make sure they are off after AutoReset
for diag_name in warn_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
f"__diag__.{diag_name} not set to True",
)
def testWordInternalReRangeWithConsecutiveChars(self):
self.assertParseAndCheckList(
pp.Word("ABCDEMNXYZ"),
"ABCDEMNXYZABCDEMNXYZABCDEMNXYZ",
["ABCDEMNXYZABCDEMNXYZABCDEMNXYZ"],
)
def testWordInternalReRangesKnownSet(self):
tests = [
("ABCDEMNXYZ", "[A-EMNX-Z]+"),
(pp.printables, "[!-~]+"),
(pp.alphas, "[A-Za-z]+"),
(pp.alphanums, "[0-9A-Za-z]+"),
(pp.pyparsing_unicode.Latin1.printables, "[!-~¡-ÿ]+"),
(pp.pyparsing_unicode.Latin1.alphas, "[A-Za-zªµºÀ-ÖØ-öø-ÿ]+"),
(pp.pyparsing_unicode.Latin1.alphanums, "[0-9A-Za-zª²³µ¹ºÀ-ÖØ-öø-ÿ]+"),
(pp.alphas8bit, "[À-ÖØ-öø-ÿ]+"),
]
failed = []
for word_string, expected_re in tests:
try:
msg = f"failed to generate correct internal re for {word_string!r}"
resultant_re = pp.Word(word_string).reString
self.assertEqual(
expected_re,
resultant_re,
msg + f"; expected {expected_re!r} got {resultant_re!r}",
)
except AssertionError:
failed.append(msg)
if failed:
print("Errors:\n{}".format("\n".join(failed)))
self.fail("failed to generate correct internal re's")
def testWordInternalReRanges(self):
import random
esc_chars = r"\^-]["
esc_chars2 = r"*+.?"
def esc_re_set_char(c):
return "\\" + c if c in esc_chars else c
def esc_re_set2_char(c):
return "\\" + c if c in esc_chars + esc_chars2 else c
for esc_char in esc_chars + esc_chars2:
# test escape char as first character in range
next_char = chr(ord(esc_char) + 1)
prev_char = chr(ord(esc_char) - 1)
esc_word = pp.Word(esc_char + next_char)
expected = rf"[{esc_re_set_char(esc_char)}{esc_re_set_char(next_char)}]+"
print(
f"Testing escape char: {esc_char} -> {esc_word} re: '{esc_word.reString}')"
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, next_char]) for __ in range(16)
)
print(
f"Match '{test_string}' -> {test_string == esc_word.parse_string(test_string, parse_all=True)[0]}"
)
self.assertEqual(
test_string,
esc_word.parse_string(test_string, parse_all=True)[0],
"Word using escaped range char failed to parse",
)
# test escape char as last character in range
esc_word = pp.Word(prev_char + esc_char)
expected = rf"[{esc_re_set_char(prev_char)}{esc_re_set_char(esc_char)}]+"
print(
f"Testing escape char: {esc_char} -> {esc_word} re: '{esc_word.reString}')"
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, prev_char]) for __ in range(16)
)
print(
f"Match '{test_string}' -> {test_string == esc_word.parse_string(test_string, parse_all=True)[0]}"
)
self.assertEqual(
test_string,
esc_word.parse_string(test_string, parse_all=True)[0],
"Word using escaped range char failed to parse",
)
# test escape char as first character in range
next_char = chr(ord(esc_char) + 1)
prev_char = chr(ord(esc_char) - 1)
esc_word = pp.Word(esc_char + next_char)
expected = rf"[{esc_re_set_char(esc_char)}{esc_re_set_char(next_char)}]+"
print(
f"Testing escape char: {esc_char} -> {esc_word} re: '{esc_word.reString}')"
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, next_char]) for __ in range(16)
)
print(
f"Match '{test_string}' -> {test_string == esc_word.parse_string(test_string, parse_all=True)[0]}"
)
self.assertEqual(
test_string,
esc_word.parse_string(test_string, parse_all=True)[0],
"Word using escaped range char failed to parse",
)
# test escape char as only character in range
esc_word = pp.Word(esc_char, pp.alphas.upper())
expected = rf"{esc_re_set2_char(esc_char)}[A-Z]*"
print(
f"Testing escape char: {esc_char} -> {esc_word} re: '{esc_word.reString}')"
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = esc_char + "".join(
random.choice(pp.alphas.upper()) for __ in range(16)
)
print(
f"Match '{test_string}' -> {test_string == esc_word.parse_string(test_string, parse_all=True)[0]}"
)
self.assertEqual(
test_string,
esc_word.parse_string(test_string, parse_all=True)[0],
"Word using escaped range char failed to parse",
)
# test escape char as only character
esc_word = pp.Word(esc_char, pp.alphas.upper())
expected = rf"{re.escape(esc_char)}[A-Z]*"
print(
f"Testing escape char: {esc_char} -> {esc_word} re: '{esc_word.reString}')"
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = esc_char + "".join(
random.choice(pp.alphas.upper()) for __ in range(16)
)
print(
f"Match '{test_string}' -> {test_string == esc_word.parse_string(test_string, parse_all=True)[0]}"
)
self.assertEqual(
test_string,
esc_word.parse_string(test_string, parse_all=True)[0],
"Word using escaped range char failed to parse",
)
print()
def testWordWithIdentChars(self):
ppu = pp.pyparsing_unicode
latin_identifier = pp.Word(pp.identchars, pp.identbodychars)("latin*")
japanese_identifier = ppu.Japanese.identifier("japanese*")
cjk_identifier = ppu.CJK.identifier("cjk*")
greek_identifier = ppu.Greek.identifier("greek*")
cyrillic_identifier = ppu.Cyrillic.identifier("cyrillic*")
thai_identifier = ppu.Thai.identifier("thai*")
idents = (
latin_identifier
| japanese_identifier
| cjk_identifier # must follow japanese_identifier, since CJK is superset
| thai_identifier
| greek_identifier
| cyrillic_identifier
)
result = idents[...].parse_string(
"abc_100 кириллицаx_10 日本語f_300 ไทยg_600 def_200 漢字y_300 한국어_中文c_400 Ελληνικάb_500",
parse_all=True,
)
self.assertParseResultsEquals(
result,
[
"abc_100",
"кириллицаx_10",
"日本語f_300",
"ไทยg_600",
"def_200",
"漢字y_300",
"한국어_中文c_400",
"Ελληνικάb_500",
],
{
"cjk": ["한국어_中文c_400"],
"cyrillic": ["кириллицаx_10"],
"greek": ["Ελληνικάb_500"],
"japanese": ["日本語f_300", "漢字y_300"],
"latin": ["abc_100", "def_200"],
"thai": ["ไทยg_600"],
},
)
def testChainedTernaryOperator(self):
# fmt: off
TERNARY_INFIX = pp.infix_notation(
ppc.integer,
[
(("?", ":"), 3, pp.OpAssoc.LEFT),
]
)
self.assertParseAndCheckList(
TERNARY_INFIX, "1?1:0?1:0", [[1, "?", 1, ":", 0, "?", 1, ":", 0]]
)
TERNARY_INFIX = pp.infix_notation(
ppc.integer,
[
(("?", ":"), 3, pp.OpAssoc.RIGHT),
]
)
self.assertParseAndCheckList(
TERNARY_INFIX, "1?1:0?1:0", [[1, "?", 1, ":", [0, "?", 1, ":", 0]]]
)
# fmt: on
def testOneOfWithDuplicateSymbols(self):
# test making one_of with duplicate symbols
print("verify one_of handles duplicate symbols")
try:
test1 = pp.one_of("a b c d a")
except RuntimeError:
self.fail(
"still have infinite loop in one_of with duplicate symbols (string input)"
)
print("verify one_of handles duplicate symbols")
try:
test1 = pp.one_of("a a a b c d a")
except RuntimeError:
self.fail(
"still have infinite loop in one_of with duplicate symbols (string input)"
)
assert test1.pattern == "[abcd]"
print("verify one_of handles generator input")
try:
test1 = pp.one_of(c for c in "a b c d a d d d" if not c.isspace())
except RuntimeError:
self.fail(
"still have infinite loop in one_of with duplicate symbols (generator input)"
)
assert test1.pattern == "[abcd]"
print("verify one_of handles list input")
try:
test1 = pp.one_of("a b c d a".split())
except RuntimeError:
self.fail(
"still have infinite loop in one_of with duplicate symbols (list input)"
)
assert test1.pattern == "[abcd]"
print("verify one_of handles set input")
try:
test1 = pp.one_of(set("a b c d a".split()))
except RuntimeError:
self.fail(
"still have infinite loop in one_of with duplicate symbols (set input)"
)
# set will generate scrambled letters, get pattern but resort to test
pattern_letters = test1.pattern[1:-1]
assert sorted(pattern_letters) == sorted("abcd")
def testOneOfWithEmptyList(self):
"""test one_of helper function with an empty list as input"""
tst = []
result = pp.one_of(tst)
expected = True
found = isinstance(result, pp.NoMatch)
self.assertEqual(expected, found)
def testOneOfWithUnexpectedInput(self):
"""test one_of with an input that isn't a string or iterable"""
with self.assertRaises(
TypeError, msg="failed to warn use of integer for one_of"
):
expr = pp.one_of(6)
def testMatchFirstIteratesOverAllChoices(self):
# test MatchFirst bugfix
print("verify MatchFirst iterates properly")
results = pp.quoted_string.parse_string(
"'this is a single quoted string'", parse_all=True
)
self.assertTrue(
len(results) > 0, "MatchFirst error - not iterating over all choices"
)
def testStreamlineOfExpressionsAfterSetName(self):
bool_constant = pp.Literal("True") | "true" | "False" | "false"
self.assertEqual(
"{'True' | 'true' | 'False' | 'false'}", str(bool_constant.streamline())
)
bool_constant.set_name("bool")
self.assertEqual("bool", str(bool_constant.streamline()))
def testStreamlineOfSubexpressions(self):
# verify streamline of subexpressions
print("verify proper streamline logic")
compound = pp.Literal("A") + "B" + "C" + "D"
self.assertEqual(2, len(compound.exprs), "bad test setup")
print(compound)
compound.streamline()
print(compound)
self.assertEqual(4, len(compound.exprs), "streamline not working")
def testOptionalWithResultsNameAndNoMatch(self):
# test for Optional with results name and no match
print("verify Optional's do not cause match failure if have results name")
testGrammar = pp.Literal("A") + pp.Optional("B")("gotB") + pp.Literal("C")
try:
testGrammar.parse_string("ABC", parse_all=True)
testGrammar.parse_string("AC", parse_all=True)
except pp.ParseException as pe:
print(pe.pstr, "->", pe)
self.fail(f"error in Optional matching of string {pe.pstr}")
def testReturnOfFurthestException(self):
# test return of furthest exception
testGrammar = (
pp.Literal("A") | (pp.Literal("B") + pp.Literal("C")) | pp.Literal("E")
)
try:
testGrammar.parse_string("BC", parse_all=True)
testGrammar.parse_string("BD", parse_all=True)
except pp.ParseException as pe:
print(pe.pstr, "->", pe)
self.assertEqual("BD", pe.pstr, "wrong test string failed to parse")
self.assertEqual(
1, pe.loc, "error in Optional matching, pe.loc=" + str(pe.loc)
)
self.assertTrue(
"found 'D'" in str(pe), "wrong alternative raised exception"
)
def testValidateCorrectlyDetectsInvalidLeftRecursion(self):
# test validate
print("verify behavior of validate()")
if IRON_PYTHON_ENV:
print("disable this test under IronPython")
return
def testValidation(grmr, gnam, isValid):
try:
grmr.streamline()
with self.assertWarns(
DeprecationWarning, msg="failed to warn validate() is deprecated"
):
grmr.validate()
self.assertTrue(isValid, "validate() accepted invalid grammar " + gnam)
except pp.RecursiveGrammarException as rge:
print(grmr)
print(rge)
self.assertFalse(isValid, "validate() rejected valid grammar " + gnam)
fwd = pp.Forward()
g1 = pp.OneOrMore((pp.Literal("A") + "B" + "C") | fwd)
g2 = ("C" + g1)[...]
fwd <<= pp.Group(g2)
testValidation(fwd, "fwd", isValid=True)
fwd2 = pp.Forward()
fwd2 <<= pp.Group("A" | fwd2)
testValidation(fwd2, "fwd2", isValid=False)
fwd3 = pp.Forward()
fwd3 <<= pp.Optional("A") + fwd3
testValidation(fwd3, "fwd3", isValid=False)
def testGetNameBehavior(self):
# test get_name
print("verify behavior of get_name()")
aaa = pp.Group(pp.Word("a")("A"))
bbb = pp.Group(pp.Word("b")("B"))
ccc = pp.Group(":" + pp.Word("c")("C"))
g1 = "XXX" + (aaa | bbb | ccc)[...]
teststring = "XXX b bb a bbb bbbb aa bbbbb :c bbbbbb aaa"
names = []
print(g1.parse_string(teststring, parse_all=True).dump())
for t in g1.parse_string(teststring, parse_all=True):
print(t, repr(t))
try:
names.append(t[0].get_name())
except Exception:
try:
names.append(t.get_name())
except Exception:
names.append(None)
print(teststring)
print(names)
self.assertEqual(
[None, "B", "B", "A", "B", "B", "A", "B", None, "B", "A"],
names,
"failure in getting names for tokens",
)
IF, AND, BUT = map(pp.Keyword, "if and but".split())
ident = ~(IF | AND | BUT) + pp.Word(pp.alphas)("non-key")
scanner = pp.OneOrMore(IF | AND | BUT | ident)
def getNameTester(s, l, t):
print(t, t.get_name())
ident.add_parse_action(getNameTester)
scanner.parse_string("lsjd sldkjf IF Saslkj AND lsdjf", parse_all=True)
# test ParseResults.get() method
print("verify behavior of ParseResults.get()")
# use sum() to merge separate groups into single ParseResults
res = sum(g1.parse_string(teststring, parse_all=True)[1:])
print(res.dump())
print(res.get("A", "A not found"))
print(res.get("D", "!D"))
self.assertEqual(
"aaa", res.get("A", "A not found"), "get on existing key failed"
)
self.assertEqual("!D", res.get("D", "!D"), "get on missing key failed")
def testOptionalBeyondEndOfString(self):
print("verify handling of Optional's beyond the end of string")
testGrammar = "A" + pp.Optional("B") + pp.Optional("C") + pp.Optional("D")
testGrammar.parse_string("A", parse_all=True)
testGrammar.parse_string("AB", parse_all=True)
def testCreateLiteralWithEmptyString(self):
# test creating Literal with empty string
print('verify that Literal("") is optimized to Empty()')
e = pp.Literal("")
self.assertIsInstance(e, pp.Empty)
def testLineMethodSpecialCaseAtStart(self):
# test line() behavior when starting at 0 and the opening line is an \n
print("verify correct line() behavior when first line is empty string")
self.assertEqual(
"",
pp.line(0, "\nabc\ndef\n"),
"Error in line() with empty first line in text",
)
txt = "\nabc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["", "abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with empty first line in text",
)
txt = "abc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with non-empty first line in text",
)
def testRepeatedTokensWhenPackratting(self):
# test bugfix with repeated tokens when packrat parsing enabled
print("verify behavior with repeated tokens when packrat parsing is enabled")
a = pp.Literal("a")
b = pp.Literal("b")
c = pp.Literal("c")
abb = a + b + b
abc = a + b + c
aba = a + b + a
grammar = abb | abc | aba
self.assertEqual(
"aba",
"".join(grammar.parse_string("aba", parse_all=True)),
"Packrat ABA failure!",
)
def testSetResultsNameWithOneOrMoreAndZeroOrMore(self):
print("verify behavior of set_results_name with OneOrMore and ZeroOrMore")
stmt = pp.Keyword("test")
print(stmt[...]("tests").parse_string("test test", parse_all=True).tests)
print(stmt[1, ...]("tests").parse_string("test test", parse_all=True).tests)
print(
pp.Optional(stmt[1, ...]("tests"))
.parse_string("test test", parse_all=True)
.tests
)
print(
pp.Optional(stmt[1, ...])("tests")
.parse_string("test test", parse_all=True)
.tests
)
print(
pp.Optional(pp.DelimitedList(stmt))("tests")
.parse_string("test,test", parse_all=True)
.tests
)
self.assertEqual(
2,
len(stmt[...]("tests").parse_string("test test", parse_all=True).tests),
"ZeroOrMore failure with set_results_name",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parse_string("test test", parse_all=True).tests),
"OneOrMore failure with set_results_name",
)
self.assertEqual(
2,
len(
pp.Optional(stmt[1, ...]("tests"))
.parse_string("test test", parse_all=True)
.tests
),
"OneOrMore failure with set_results_name",
)
self.assertEqual(
2,
len(
pp.Optional(pp.DelimitedList(stmt))("tests")
.parse_string("test,test", parse_all=True)
.tests
),
"DelimitedList failure with set_results_name",
)
self.assertEqual(
2,
len((stmt * 2)("tests").parse_string("test test", parse_all=True).tests),
"multiplied(1) failure with set_results_name",
)
self.assertEqual(
2,
len(stmt[..., 2]("tests").parse_string("test test", parse_all=True).tests),
"multiplied(2) failure with set_results_name",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parse_string("test test", parse_all=True).tests),
"multiplied(3) failure with set_results_name",
)
self.assertEqual(
2,
len(stmt[2, ...]("tests").parse_string("test test", parse_all=True).tests),
"multiplied(3) failure with set_results_name",
)
def testParseResultsReprWithResultsNames(self):
word = pp.Word(pp.printables)("word")
res = word[...].parse_string("test blub", parse_all=True)
print(repr(res))
print(res["word"])
print(res.as_dict())
self.assertEqual(
"ParseResults(['test', 'blub'], {'word': 'blub'})",
repr(res),
"incorrect repr for ParseResults with list_all_matches=False",
)
word = pp.Word(pp.printables)("word*")
res = word[...].parse_string("test blub", parse_all=True)
print(repr(res))
print(res["word"])
print(res.as_dict())
self.assertEqual(
"ParseResults(['test', 'blub'], {'word': ['test', 'blub']})",
repr(res),
"incorrect repr for ParseResults with list_all_matches=True",
)
def testWarnUsingLshiftForward(self):
print(
"verify that using '<<' operator with a Forward raises a warning if there is a dangling '|' operator"
)
fwd = pp.Forward()
print("unsafe << and |, but diag not enabled, should not warn")
fwd << pp.Word("a") | pp.Word("b")
pp.enable_diag(pp.Diagnostics.warn_on_match_first_with_lshift_operator)
with self.assertWarns(
UserWarning, msg="failed to warn of using << and | operators"
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd << pp.Word("a") | pp.Word("b")
with self.assertWarns(
UserWarning,
msg="failed to warn of using << and | operators (within lambda)",
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd_fn = lambda expr1, expr2: fwd << expr1 | expr2
fwd_fn(pp.Word("a"), pp.Word("b"))
fwd = pp.Forward()
print("safe <<= and |, should not warn")
fwd <<= pp.Word("a") | pp.Word("b")
c = fwd | pp.Word("c")
print("safe << and (|), should not warn")
with self.assertDoesNotWarn(
"warning raised on safe use of << with Forward and MatchFirst"
):
fwd = pp.Forward()
fwd << (pp.Word("a") | pp.Word("b"))
c = fwd | pp.Word("c")
def testParseExpressionsWithRegex(self):
from itertools import product
match_empty_regex = pp.Regex(r"[a-z]*")
match_nonempty_regex = pp.Regex(r"[a-z]+")
parser_classes = pp.ParseExpression.__subclasses__()
test_string = "abc def"
expected = ["abc"]
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), parser_classes
):
print(expr, cls)
parser = cls([expr])
parsed_result = parser.parse_string(test_string, parse_all=False)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), (pp.MatchFirst, pp.Or)
):
parser = cls([expr, expr])
print(parser)
parsed_result = parser.parse_string(test_string, parse_all=False)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
def testAssertParseAndCheckDict(self):
"""test assertParseAndCheckDict in test framework"""
expr = pp.Word(pp.alphas)("item") + pp.Word(pp.nums)("qty")
self.assertParseAndCheckDict(
expr, "balloon 25", {"item": "balloon", "qty": "25"}
)
exprWithInt = pp.Word(pp.alphas)("item") + ppc.integer("qty")
self.assertParseAndCheckDict(
exprWithInt, "rucksack 49", {"item": "rucksack", "qty": 49}
)
def testOnlyOnce(self):
"""test class OnlyOnce and its reset method"""
# use a parse action to compute the sum of the parsed integers,
# and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
pa = pp.OnlyOnce(append_sum)
expr = pp.OneOrMore(pp.Word(pp.nums)).add_parse_action(pa)
result = expr.parse_string("0 123 321", parse_all=True)
print(result.dump())
expected = ["0", "123", "321", 444]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce first call"
)
with self.assertRaisesParseException(
msg="failed to raise exception calling OnlyOnce more than once"
):
result2 = expr.parse_string("1 2 3 4 5", parse_all=True)
pa.reset()
result = expr.parse_string("100 200 300")
print(result.dump())
expected = ["100", "200", "300", 600]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce after reset"
)
def testGoToColumn(self):
"""tests for GoToColumn class"""
dateExpr = pp.Regex(r"\d\d(\.\d\d){2}")("date")
numExpr = ppc.number("num")
sample = """\
date Not Important value NotImportant2
11.11.13 | useless . useless,21 useless 2 | 14.21 | asmdakldm
21.12.12 | fmpaosmfpoamsp 4 | 41 | ajfa9si90""".splitlines()
# Column number finds match
patt = dateExpr + pp.GoToColumn(70).ignore("|") + numExpr + pp.rest_of_line
infile = iter(sample)
next(infile)
expecteds = [["11.11.13", 14.21], ["21.12.12", 41]]
for line, expected in zip(infile, expecteds):
result = patt.parse_string(line, parse_all=True)
print(result)
self.assertEqual(
expected, [result.date, result.num], msg="issue with GoToColumn"
)
# Column number does NOT match
patt = dateExpr("date") + pp.GoToColumn(30) + numExpr + pp.rest_of_line
infile = iter(sample)
next(infile)
for line in infile:
with self.assertRaisesParseException(
msg="issue with GoToColumn not finding match"
):
result = patt.parse_string(line, parse_all=True)
def testExceptionExplainVariations(self):
class Modifier:
def modify_upper(self, tokens):
tokens[:] = map(str.upper, tokens)
modder = Modifier()
# force an exception in the attached parse action
# integer has a parse action to convert to an int;
# this parse action should fail with a TypeError, since
# str.upper expects a str argument, not an int
grammar = ppc.integer().add_parse_action(modder.modify_upper)
self_testcase_name = "tests.test_unit." + type(self).__name__
try:
grammar.parse_string("1000", parse_all=True)
except Exception as e:
# extract the exception explanation
explain_str = ParseException.explain_exception(e)
print(explain_str)
explain_str_lines = explain_str.splitlines()
expected = [
self_testcase_name,
"pyparsing.core.Word - integer",
"tests.test_unit.Modifier",
"pyparsing.results.ParseResults",
]
# verify the list of names shown in the explain "stack"
self.assertEqual(
expected, explain_str_lines[-len(expected) :], msg="invalid explain str"
)
# check type of raised exception matches explain output
# (actual exception text varies by Python version, and even
# by how the exception is raised, so we can only check the
# type name)
exception_line = explain_str_lines[-(len(expected) + 1)]
self.assertTrue(
exception_line.startswith("TypeError:"),
msg=f"unexpected exception line ({exception_line!r})",
)
def testExceptionMessageCustomization(self):
with resetting(pp.ParseBaseException, "formatted_message"):
def custom_exception_message(exc) -> str:
found_phrase = f", found {exc.found}" if exc.found else ""
return f"{exc.lineno}:{exc.column} {exc.msg}{found_phrase}"
pp.ParseBaseException.formatted_message = custom_exception_message
try:
pp.Word(pp.nums).parse_string("ABC")
except ParseException as pe:
pe_msg = str(pe)
else:
pe_msg = ""
self.assertEqual("1:1 Expected W:(0-9), found 'ABC'", pe_msg)
def testForwardReferenceException(self):
token = pp.Forward()
num = pp.Word(pp.nums)
num.set_name("num")
text = pp.Word(pp.alphas)
text.set_name("text")
fail = pp.Regex(r"\\[A-Za-z]*")("name")
def parse_fail(s, loc, toks):
raise pp.ParseFatalException(s, loc, f"Unknown symbol: {toks['name']}")
fail.set_parse_action(parse_fail)
token <<= num | text | fail
# If no name is given, do not intercept error messages
with self.assertRaises(pp.ParseFatalException, msg="Unknown symbol: \\fail"):
token.parse_string("\\fail")
# If name is given, do intercept error messages
token.set_name("token")
with self.assertRaises(pp.ParseFatalException, msg="Expected token, found.*"):
token.parse_string("\\fail")
def testForwardExceptionText(self):
wd = pp.Word(pp.alphas)
ff = pp.Forward().set_name("fffff!")
ff <<= wd + pp.Opt(ff)
with self.assertRaises(pp.ParseFatalException, msg="no numbers!"):
try:
ff.parse_string("123")
except pp.ParseException as pe:
raise pp.ParseSyntaxException("no numbers! just alphas!") from pe
with self.assertRaises(pp.ParseException, msg="Expected W:(A-Za-z)"):
ff2 = pp.Forward()
ff2 <<= wd
ff2.parse_string("123")
def testForwardExceptionText2(self):
"""
Test various expressions for error messages, under conditions in wrapped ParserElements
"""
v = "(omit closing paren"
w = "('omit closing quote)"
for s, expr, expected in (
(v, pp.nested_expr(), "Expected ')'"),
(v, pp.Combine(pp.nested_expr(), adjacent=False), "Expected ')'"),
(
v,
pp.QuotedString("(", end_quote_char=")"),
"Expected quoted string, starting with ( ending with ), found '('",
),
(w, pp.nested_expr(content=pp.sgl_quoted_string), "Expected ')'"),
("", pp.nested_expr(), ""),
("", pp.Word("A"), ""),
):
print(repr(s))
print(expr)
with self.subTest("parse expr", expr=expr, s=s, expected=expected):
with self.assertRaisesParseException(expected_msg=expected) as ctx:
expr.parse_string(s, parse_all=True)
print(ctx.exception)
with self.subTest("parse expr[1, ...]", expr=expr, s=s, expected=expected):
with self.assertRaisesParseException(expected_msg=expected) as ctx:
expr[1, ...].parse_string(s, parse_all=True)
print(ctx.exception)
with self.subTest(
"parse DelimitedList(expr)", expr=expr, s=s, expected=expected
):
with self.assertRaisesParseException(expected_msg=expected) as ctx:
pp.DelimitedList(expr).parse_string(s, parse_all=True)
print(ctx.exception)
print()
def testMiscellaneousExceptionBits(self):
pp.ParserElement.verbose_stacktrace = True
self_testcase_name = "tests.test_unit." + type(self).__name__
# force a parsing exception - match an integer against "ABC"
try:
pp.Word(pp.nums).parse_string("ABC", parse_all=True)
except pp.ParseException as pe:
expected_str = "Expected W:(0-9), found 'ABC' (at char 0), (line:1, col:1)"
self.assertEqual(expected_str, str(pe), "invalid ParseException str")
self.assertEqual(expected_str, repr(pe), "invalid ParseException repr")
self.assertEqual(
">!<ABC", pe.mark_input_line(), "invalid default mark input line"
)
self.assertEqual(
"ABC", pe.mark_input_line(""), "invalid mark input line with '' marker"
)
# test explain using depth=None, 0, 1
depth_none_explain_str = pe.explain(depth=None)
depth_0_explain_str = pe.explain(depth=0)
depth_1_explain_str = pe.explain(depth=1)
print(depth_none_explain_str)
print()
print(depth_0_explain_str)
print()
print(depth_1_explain_str)
expr_name = "pyparsing.core.Word - W:(0-9)"
for expected_function in [self_testcase_name, expr_name]:
self.assertTrue(
expected_function in depth_none_explain_str,
f"{expected_function!r} not found in ParseException.explain()",
)
self.assertFalse(
expected_function in depth_0_explain_str,
f"{expected_function!r} found in ParseException.explain(depth=0)",
)
self.assertTrue(
expr_name in depth_1_explain_str,
f"{expected_function!r} not found in ParseException.explain()",
)
self.assertFalse(
self_testcase_name in depth_1_explain_str,
f"{expected_function!r} not found in ParseException.explain()",
)
def testExpressionDefaultStrings(self):
expr = pp.Word(pp.nums)
print(expr)
self.assertEqual("W:(0-9)", repr(expr))
expr = pp.Word(pp.nums, exact=3)
print(expr)
self.assertEqual("W:(0-9){3}", repr(expr))
expr = pp.Word(pp.nums, min=2)
print(expr)
self.assertEqual("W:(0-9){2,...}", repr(expr))
expr = pp.Word(pp.nums, max=3)
print(expr)
self.assertEqual("W:(0-9){1,3}", repr(expr))
expr = pp.Word(pp.nums, min=2, max=3)
print(expr)
self.assertEqual("W:(0-9){2,3}", repr(expr))
expr = pp.Char(pp.nums)
print(expr)
self.assertEqual("(0-9)", repr(expr))
def testEmptyExpressionsAreHandledProperly(self):
try:
from pyparsing.diagram import to_railroad
except ModuleNotFoundError as mnfe:
print("Failed 'from pyparsing.diagram import to_railroad'"
f"\n {type(mnfe).__name__}: {mnfe}")
if mnfe.__cause__:
print(f"\n {type(mnfe.__cause__).__name__}: {mnfe.__cause__}")
self.skipTest("Failed 'from pyparsing.diagram import to_railroad'")
for cls in (pp.And, pp.Or, pp.MatchFirst, pp.Each):
print("testing empty", cls.__name__)
expr = cls([])
expr.streamline()
to_railroad(expr)
def testForwardsDoProperStreamlining(self):
wd = pp.Word(pp.alphas)
w3 = wd + wd + wd
# before streamlining, w3 is {{W:(A-Za-z) W:(A-Za-z)} W:(A-Za-z)}
self.assertIsInstance(w3.exprs[0], pp.And)
self.assertEqual(len(w3.exprs), 2)
ff = pp.Forward()
ff <<= w3 + pp.Opt(ff)
# before streamlining, ff is {{{W:(A-Za-z) W:(A-Za-z)} W:(A-Za-z)} [Forward: None]}
self.assertEqual(len(ff.expr.exprs), 2)
ff.streamline()
# after streamlining:
# w3 is {W:(A-Za-z) W:(A-Za-z) W:(A-Za-z)}
# ff is {W:(A-Za-z) W:(A-Za-z) W:(A-Za-z) [Forward: None]}
self.assertEqual(len(ff.expr.exprs), 4)
self.assertEqual(len(w3.exprs), 3)
test_exception_messages_tests = (
(pp.Word(pp.alphas), "123", "Expected W:(A-Za-z), found '123'"),
(pp.Word(pp.alphas).set_name("word"), "123", "Expected word, found '123'"),
(
pp.Group(pp.Word(pp.alphas).set_name("word")),
"123",
"Expected word, found '123'",
),
(
pp.OneOrMore(pp.Word(pp.alphas).set_name("word")),
"123",
"Expected word, found '123'",
),
(
pp.DelimitedList(pp.Word(pp.alphas).set_name("word")),
"123",
"Expected word, found '123'",
),
(
pp.Suppress(pp.Word(pp.alphas).set_name("word")),
"123",
"Expected word, found '123'",
),
(
pp.Forward() << pp.Word(pp.alphas).set_name("word"),
"123",
"Expected word, found '123'",
),
(
pp.Forward() << pp.Word(pp.alphas),
"123",
"Expected W:(A-Za-z), found '123'",
),
(
pp.Group(pp.Word(pp.alphas)),
"123",
"Expected W:(A-Za-z), found '123'",
),
(
"prefix" + (pp.Regex("a").set_name("a") | pp.Regex("b").set_name("b")),
"prefixc",
"Expected {a | b}, found 'c'",
),
(
"prefix" + (pp.Regex("a").set_name("a") | pp.Regex("b").set_name("b")),
"prefix c",
"Expected {a | b}, found 'c'",
),
(
"prefix" + (pp.Regex("a").set_name("a") ^ pp.Regex("b").set_name("b")),
"prefixc",
"Expected {a ^ b}, found 'c'",
),
(
"prefix" + (pp.Regex("a").set_name("a") ^ pp.Regex("b").set_name("b")),
"prefix c",
"Expected {a ^ b}, found 'c'",
),
)
def test_exception_messages(self, tests=test_exception_messages_tests):
for expr, input_str, expected_msg in tests:
with self.subTest(expr=expr, input_str=input_str):
with self.assertRaisesParseException(expected_msg=expected_msg):
expr.parse_string(input_str)
def test_exception_messages_with_exception_subclass(self):
class TooManyRepsException(pp.ParseFatalException):
pass
@pp.trace_parse_action
def no_more_than_3(t):
if len(t) > 3:
raise TooManyRepsException(f"{len(t)} is too many, only 3 allowed")
# parse an int followed by no more than 3 words
parser = pp.Word(pp.nums) + pp.Group(
pp.Word(pp.alphas)[...].add_parse_action(no_more_than_3)
)
# should succeed
result = parser.parse_string("1000 abc def ghi")
print(result.dump())
# should raise exception with local exception message
with self.assertRaisesParseException(
exc_type=ParseFatalException,
expected_msg="4 is too many, only 3 allowed",
msg="wrong exception message",
) as pe_context:
result = parser.parse_string("2000 abc def ghi jkl")
print(pe_context.exception)
def test_pep8_synonyms(self):
"""
Test that staticmethods wrapped by replaced_by_pep8 wrapper are properly
callable as staticmethods.
"""
def run_subtest(fn_name, expr=None, args=""):
bool_expr = pp.one_of("true false", as_keyword=True)
if expr is None:
expr = "bool_expr"
# try calling a ParserElement staticmethod via a ParserElement instance
with self.subTest(fn_name=fn_name):
exec(f"{expr}.{fn_name}({args})", globals(), locals())
# access staticmethod synonyms using a ParserElement
parser_element_staticmethod_names = """
enable_packrat disable_memoization enable_left_recursion reset_cache
""".split()
if not (
pp.ParserElement._packratEnabled or pp.ParserElement._left_recursion_enabled
):
for name in parser_element_staticmethod_names:
run_subtest(name)
pp.ParserElement.disable_memoization()
run_subtest("set_default_whitespace_chars", args="' '")
run_subtest("inline_literals_using", args="pp.Suppress")
run_subtest(
"set_default_keyword_chars", expr="pp.Keyword('START')", args="'abcde'"
)
pass
|
Test02_WithoutPackrat
|
python
|
paramiko__paramiko
|
paramiko/ssh_exception.py
|
{
"start": 5054,
"end": 6773
}
|
class ____(socket.error):
"""
Multiple connection attempts were made and no families succeeded.
This exception class wraps multiple "real" underlying connection errors,
all of which represent failed connection attempts. Because these errors are
not guaranteed to all be of the same error type (i.e. different errno,
`socket.error` subclass, message, etc) we expose a single unified error
message and a ``None`` errno so that instances of this class match most
normal handling of `socket.error` objects.
To see the wrapped exception objects, access the ``errors`` attribute.
``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1',
22)``) and whose values are the exception encountered trying to connect to
that address.
It is implied/assumed that all the errors given to a single instance of
this class are from connecting to the same hostname + port (and thus that
the differences are in the resolution of the hostname - e.g. IPv4 vs v6).
.. versionadded:: 1.16
"""
def __init__(self, errors):
"""
:param dict errors:
The errors dict to store, as described by class docstring.
"""
addrs = sorted(errors.keys())
body = ", ".join([x[0] for x in addrs[:-1]])
tail = addrs[-1][0]
if body:
msg = "Unable to connect to port {0} on {1} or {2}"
else:
msg = "Unable to connect to port {0} on {2}"
super().__init__(
None, msg.format(addrs[0][1], body, tail) # stand-in for errno
)
self.errors = errors
def __reduce__(self):
return (self.__class__, (self.errors,))
|
NoValidConnectionsError
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/conv_test.py
|
{
"start": 5386,
"end": 7514
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, stride, N, H, W, G, pad, device):
self.inputs = {"input": torch.rand(N, IC, H, W, device=device)}
# Use 1 as kernel for pointwise convolution
self.conv2d = nn.Conv2d(IC, OC, 1, stride=stride, groups=G, padding=pad).to(
device=device
)
self.set_module_name("Conv2dPointwise")
def forward(self, input):
return self.conv2d(input)
def get_memory_traffic_bytes(self):
"""Calculate memory traffic for Conv2dPointwise: read(input + weight) + write(output)"""
input_tensor = self.inputs["input"]
# Run forward to get output shape
with torch.no_grad():
output = self.conv2d(input_tensor)
bytes_per_element = input_tensor.element_size()
# Input: N × IC × H × W
input_elements = input_tensor.numel()
# Weight: OC × (IC/G) × 1 × 1
weight_elements = self.conv2d.weight.numel()
# Output: N × OC × H_out × W_out
output_elements = output.numel()
total_elements = input_elements + weight_elements + output_elements
return total_elements * bytes_per_element
op_bench.generate_pt_test(
configs.conv_2d_configs_short + configs.conv_2d_configs_long, Conv2dBenchmark
)
op_bench.generate_pt_test(
configs.conv_2d_configs_short + configs.conv_2d_configs_long,
ConvTranspose2dBenchmark,
)
op_bench.generate_pt_test(
configs.conv_2d_pw_configs_short + configs.conv_2d_pw_configs_long,
Conv2dPointwiseBenchmark,
)
op_bench.generate_pt_gradient_test(
configs.remove_cpu(configs.conv_2d_configs_short + configs.conv_2d_configs_long),
Conv2dBenchmark,
)
op_bench.generate_pt_gradient_test(
configs.remove_cpu(configs.conv_2d_configs_short + configs.conv_2d_configs_long),
ConvTranspose2dBenchmark,
)
op_bench.generate_pt_gradient_test(
configs.remove_cpu(
configs.conv_2d_pw_configs_short + configs.conv_2d_pw_configs_long
),
Conv2dPointwiseBenchmark,
)
"""
Microbenchmarks for Conv3d and ConvTranspose3d operators.
"""
|
Conv2dPointwiseBenchmark
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 579643,
"end": 580027
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Discussion", graphql_name="node")
"""The item at the end of the edge."""
|
DiscussionEdge
|
python
|
huggingface__transformers
|
tests/models/sam2/test_modeling_sam2.py
|
{
"start": 1524,
"end": 4630
}
|
class ____:
def __init__(
self,
parent,
hidden_size=12,
embed_dim_per_stage=[12, 24, 48, 96],
num_attention_heads_per_stage=[1, 2, 4, 8],
num_channels=3,
image_size=128,
patch_kernel_size=7,
patch_stride=4,
patch_padding=3,
batch_size=2,
blocks_per_stage=[1, 2, 7, 2],
backbone_channel_list=[96, 48, 24, 12],
backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]],
fpn_hidden_size=32,
is_training=False,
):
self.parent = parent
self.hidden_size = hidden_size
self.image_size = image_size
self.num_channels = num_channels
self.patch_kernel_size = patch_kernel_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.batch_size = batch_size
self.is_training = is_training
self.blocks_per_stage = blocks_per_stage
self.embed_dim_per_stage = embed_dim_per_stage
self.num_attention_heads_per_stage = num_attention_heads_per_stage
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
def get_config(self):
backbone_config = Sam2HieraDetConfig(
hidden_size=self.hidden_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_stride=self.patch_stride,
patch_kernel_size=self.patch_kernel_size,
patch_padding=self.patch_padding,
blocks_per_stage=self.blocks_per_stage,
embed_dim_per_stage=self.embed_dim_per_stage,
num_attention_heads_per_stage=self.num_attention_heads_per_stage,
)
return Sam2VisionConfig(
backbone_config=backbone_config,
backbone_channel_list=self.backbone_channel_list,
backbone_feature_sizes=self.backbone_feature_sizes,
fpn_hidden_size=self.fpn_hidden_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = Sam2VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
output_size = self.image_size // self.patch_stride // (2 * len(self.blocks_per_stage))
output_channels = self.hidden_size * 2 * len(self.blocks_per_stage)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, output_size, output_size, output_channels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
Sam2VisionModelTester
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 3010,
"end": 3392
}
|
class ____(Protocol):
def __call__(
self,
ddl: BaseDDLElement,
target: Union[SchemaItem, str],
bind: Optional[Connection],
tables: Optional[List[Table]] = None,
state: Optional[Any] = None,
*,
dialect: Dialect,
compiler: Optional[DDLCompiler] = ...,
checkfirst: bool,
) -> bool: ...
|
DDLIfCallable
|
python
|
scipy__scipy
|
scipy/fft/_pocketfft/tests/test_basic.py
|
{
"start": 28853,
"end": 29136
}
|
class ____:
def __init__(self, data):
self._data = data
def __array__(self, dtype=None, copy=None):
return self._data
# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
# relied upon by users except for overwrite_x = False
|
FakeArray2
|
python
|
google__pytype
|
pytype/tests/test_operators3.py
|
{
"start": 501,
"end": 672
}
|
class ____(test_base.BaseTest, test_utils.OperatorsTestMixin):
"""Tests for reverse operators."""
def test_div(self):
self.check_reverse("truediv", "/")
|
ReverseTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.